diff --git a/api/Neural_Network2.py b/api/Neural_Network2.py index 09a8cf63..4cb9f08b 100644 --- a/api/Neural_Network2.py +++ b/api/Neural_Network2.py @@ -6,6 +6,7 @@ import tensorflow as tf import os import joblib +import keras from sklearn.feature_extraction.text import TfidfVectorizer @@ -28,54 +29,32 @@ def preprocess_text(text): return text class TrainingProgressCallback(Callback): - def __init__(self): + def __init__(self, epochs): super(TrainingProgressCallback, self).__init__() - self.batch_count = 0 + self.epochs = epochs + self.epoch_num = 0 - def on_batch_end(self, batch, logs=None): - print("batch end") - self.batch_count += 1 - if self.batch_count % 50 == 0: - self.update_progress(logs) - - def on_epoch_end(self, epoch, logs=None): - print("epoch end") - self.update_progress(logs) - - def update_progress(self, logs): - print("updating progress") - total_epochs = self.params['epochs'] - current_batch = self.model._train_counter - total_batches = self.params['steps'] * total_epochs - percent_complete = int((current_batch / total_batches) * 100) - - # Definir o status de treinamento como True - training_in_progress = True - - # Verificar se a época atual é a última - if current_batch == total_batches: - training_in_progress = False - - # Salvar o progresso em um arquivo JSON - training_progress = { - 'training_progress': percent_complete, - 'training_in_progress': training_in_progress - } - training_progress2 = { - 'training_progress': percent_complete, - 'training_in_progress': training_in_progress, - 'epochs': total_epochs, - 'total_batches': total_batches, - 'current_batch': current_batch - } + def on_epoch_begin(self, epoch, logs=None): + self.epoch_step = 0 + self.epoch_num += 1 - print(training_progress2) + def on_batch_end(self, batch, logs=None): + self.epoch_step += 1 + progress = self.epoch_step / self.params["steps"] / self.epochs * 100 + self.epoch_num / self.epochs * 100 + training_progress = { + 'training_progress': progress, + 'training_in_progress': True + } with open('training_progress.json', 'w') as file: json.dump(training_progress, file) + # print(progress) + # with open('training_progress.json', 'w') as file: + # json.dump(training_progress, file) -def create_and_train_model(train_texts, train_labels, name, epochs=5, batch_size=32): + +def create_and_train_model(train_texts, train_labels, name, epochs=5, batch_size=32, learning_rate=0.001): label_encoder = LabelEncoder() train_labels_encoded = label_encoder.fit_transform(train_labels) @@ -103,19 +82,14 @@ def create_and_train_model(train_texts, train_labels, name, epochs=5, batch_size ]) - model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) + model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=["accuracy"]) try: - progress_callback = TrainingProgressCallback() + progress_callback = TrainingProgressCallback(epochs=epochs) - print("train") - print(train_dataset) - print("epochs") - print(epochs) - print("batch_size") - print(batch_size) - # history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[progress_callback]) - history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2) + # history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2) + + history = model.fit(train_dataset, epochs=epochs, batch_size=batch_size, verbose=2, callbacks=[progress_callback]) model_filename = f"api/models/{str(num_classes)}-Trained-Model-{name}.weights.h5" model.save_weights(model_filename) diff --git a/api/app.py b/api/app.py index d480e789..625e5440 100644 --- a/api/app.py +++ b/api/app.py @@ -4,6 +4,7 @@ from Neural_Network2 import create_and_train_model from available_classifiers import get_available_classifiers +import time import os import atexit import threading @@ -15,9 +16,6 @@ nltk.download('wordnet') -# log = logging.getLogger('werkzeug') -# log.setLevel(logging.ERROR) - app = Flask(__name__) server_thread = None CORS(app) # Permite todas as origens por padrão (não recomendado para produção) @@ -94,8 +92,7 @@ def train_model(): with open('training_progress.json', 'w') as file: json.dump(training_progress, file) - print("Beginning training") - create_and_train_model(selected_data, selected_label, name, epochs, batch_size) + create_and_train_model(selected_data, selected_label, name, epochs, batch_size, learning_rate) return jsonify({"message": "Model train started successfully."}), 200 @@ -106,7 +103,16 @@ def get_training_status(): try: data = json.load(file) except json.decoder.JSONDecodeError: - return jsonify({'training_in_progress': True, 'training_progress': 0}) + try: + time.sleep(1) + data = json.load(file) + except json.decoder.JSONDecodeError: + try: + time.sleep(1) + data = json.load(file) + except json.decoder.JSONDecodeError: + print("error!") + return jsonify({'training_in_progress': True, 'training_progress': 0}) training_status = data.get('training_in_progress', False) progress = data.get('training_progress', 0) return jsonify({'training_in_progress': training_status, 'training_progress': progress}) @@ -114,11 +120,5 @@ def get_training_status(): return jsonify({'training_in_progress': False, 'training_progress': 0}) -#@app.teardown_appcontext -#def teardown_appcontext(error=None): - #shutdown_server() - if __name__ == '__main__': - app.run(host='127.0.0.1', port=5000, debug=True) - #server_thread = threading.Thread(target=run_flask_app) - #server_thread.start() \ No newline at end of file + app.run(host='127.0.0.1', port=5000, debug=True) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 14b780e3..373f6b42 100644 Binary files a/requirements.txt and b/requirements.txt differ diff --git a/src/pages/train.tsx b/src/pages/train.tsx index 6f83486b..58ab3a6d 100644 --- a/src/pages/train.tsx +++ b/src/pages/train.tsx @@ -51,35 +51,19 @@ export default function Train() { let retryCount = 0; - // const url = "http://localhost:5000/neural-network"; - - - // async function postData(url: string, data: { data: any[]; label: any[]; batch_size: number; epochs: number; learning_rate: number; name: string; }) { - // try { - // const response = await axios.post(url, data); - // } catch (error) { - // if (retryCount < maxRetries) { - // retryCount++; - // console.error(`Error occurred, retrying (attempt ${retryCount})...`); - // postData(url, data); // Retry recursively - // } else { - // console.error("Max retry limit reached. Unable to post data."); - // throw error; // Throw the error after maximum retries - // } - // } - // } + const url = "http://localhost:5000/neural-network"; await axios - .post("http://localhost:5000/neural-network", sendData) + .post(url, sendData) .catch(async (error) => { await axios - .post("http://localhost:5000/neural-network", sendData) + .post(url, sendData) .catch(async (error) => { await axios - .post("http://localhost:5000/neural-network", sendData) + .post(url, sendData) .catch(async (error) => { await axios - .post("http://localhost:5000/neural-network", sendData) + .post(url, sendData) .catch((error) => { console.error(error.response.data); })