Skip to content

Commit

Permalink
Progress bar
Browse files Browse the repository at this point in the history
  • Loading branch information
cmaloney111 committed Apr 1, 2024
1 parent 2f386c3 commit c43bbc0
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 84 deletions.
74 changes: 24 additions & 50 deletions api/Neural_Network2.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import tensorflow as tf
import os
import joblib
import keras

from sklearn.feature_extraction.text import TfidfVectorizer

Expand All @@ -28,54 +29,32 @@ def preprocess_text(text):
return text

class TrainingProgressCallback(Callback):
def __init__(self):
def __init__(self, epochs):
super(TrainingProgressCallback, self).__init__()
self.batch_count = 0
self.epochs = epochs
self.epoch_num = 0

def on_batch_end(self, batch, logs=None):
print("batch end")
self.batch_count += 1
if self.batch_count % 50 == 0:
self.update_progress(logs)

def on_epoch_end(self, epoch, logs=None):
print("epoch end")
self.update_progress(logs)

def update_progress(self, logs):
print("updating progress")
total_epochs = self.params['epochs']
current_batch = self.model._train_counter
total_batches = self.params['steps'] * total_epochs
percent_complete = int((current_batch / total_batches) * 100)

# Definir o status de treinamento como True
training_in_progress = True

# Verificar se a época atual é a última
if current_batch == total_batches:
training_in_progress = False

# Salvar o progresso em um arquivo JSON
training_progress = {
'training_progress': percent_complete,
'training_in_progress': training_in_progress
}

training_progress2 = {
'training_progress': percent_complete,
'training_in_progress': training_in_progress,
'epochs': total_epochs,
'total_batches': total_batches,
'current_batch': current_batch
}
def on_epoch_begin(self, epoch, logs=None):
self.epoch_step = 0
self.epoch_num += 1

print(training_progress2)
def on_batch_end(self, batch, logs=None):
self.epoch_step += 1
progress = self.epoch_step / self.params["steps"] / self.epochs * 100 + self.epoch_num / self.epochs * 100

training_progress = {
'training_progress': progress,
'training_in_progress': True
}
with open('training_progress.json', 'w') as file:
json.dump(training_progress, file)
# print(progress)
# with open('training_progress.json', 'w') as file:
# json.dump(training_progress, file)

def create_and_train_model(train_texts, train_labels, name, epochs=5, batch_size=32):

def create_and_train_model(train_texts, train_labels, name, epochs=5, batch_size=32, learning_rate=0.001):
label_encoder = LabelEncoder()
train_labels_encoded = label_encoder.fit_transform(train_labels)

Expand Down Expand Up @@ -103,19 +82,14 @@ def create_and_train_model(train_texts, train_labels, name, epochs=5, batch_size
])


model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=["accuracy"])

try:
progress_callback = TrainingProgressCallback()
progress_callback = TrainingProgressCallback(epochs=epochs)

print("train")
print(train_dataset)
print("epochs")
print(epochs)
print("batch_size")
print(batch_size)
# history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[progress_callback])
history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2)
# history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2)

history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[progress_callback])

model_filename = f"api/models/{str(num_classes)}-Trained-Model-{name}.weights.h5"
model.save_weights(model_filename)
Expand Down
26 changes: 13 additions & 13 deletions api/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from Neural_Network2 import create_and_train_model
from available_classifiers import get_available_classifiers

import time
import os
import atexit
import threading
Expand All @@ -15,9 +16,6 @@
nltk.download('wordnet')


# log = logging.getLogger('werkzeug')
# log.setLevel(logging.ERROR)

app = Flask(__name__)
server_thread = None
CORS(app) # Permite todas as origens por padrão (não recomendado para produção)
Expand Down Expand Up @@ -94,8 +92,7 @@ def train_model():
with open('training_progress.json', 'w') as file:
json.dump(training_progress, file)

print("Beginning training")
create_and_train_model(selected_data, selected_label, name, epochs, batch_size)
create_and_train_model(selected_data, selected_label, name, epochs, batch_size, learning_rate)

return jsonify({"message": "Model train started successfully."}), 200

Expand All @@ -106,19 +103,22 @@ def get_training_status():
try:
data = json.load(file)
except json.decoder.JSONDecodeError:
return jsonify({'training_in_progress': True, 'training_progress': 0})
try:
time.sleep(1)
data = json.load(file)
except json.decoder.JSONDecodeError:
try:
time.sleep(1)
data = json.load(file)
except json.decoder.JSONDecodeError:
print("error!")
return jsonify({'training_in_progress': True, 'training_progress': 0})
training_status = data.get('training_in_progress', False)
progress = data.get('training_progress', 0)
return jsonify({'training_in_progress': training_status, 'training_progress': progress})
except FileNotFoundError:
return jsonify({'training_in_progress': False, 'training_progress': 0})


#@app.teardown_appcontext
#def teardown_appcontext(error=None):
#shutdown_server()

if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
#server_thread = threading.Thread(target=run_flask_app)
#server_thread.start()
app.run(host='127.0.0.1', port=5000, debug=True)
Binary file modified requirements.txt
Binary file not shown.
26 changes: 5 additions & 21 deletions src/pages/train.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -51,35 +51,19 @@ export default function Train() {
let retryCount = 0;


// const url = "http://localhost:5000/neural-network";


// async function postData(url: string, data: { data: any[]; label: any[]; batch_size: number; epochs: number; learning_rate: number; name: string; }) {
// try {
// const response = await axios.post(url, data);
// } catch (error) {
// if (retryCount < maxRetries) {
// retryCount++;
// console.error(`Error occurred, retrying (attempt ${retryCount})...`);
// postData(url, data); // Retry recursively
// } else {
// console.error("Max retry limit reached. Unable to post data.");
// throw error; // Throw the error after maximum retries
// }
// }
// }
const url = "http://localhost:5000/neural-network";

await axios
.post("http://localhost:5000/neural-network", sendData)
.post(url, sendData)
.catch(async (error) => {
await axios
.post("http://localhost:5000/neural-network", sendData)
.post(url, sendData)
.catch(async (error) => {
await axios
.post("http://localhost:5000/neural-network", sendData)
.post(url, sendData)
.catch(async (error) => {
await axios
.post("http://localhost:5000/neural-network", sendData)
.post(url, sendData)
.catch((error) => {
console.error(error.response.data);
})
Expand Down

0 comments on commit c43bbc0

Please sign in to comment.