diff --git a/lab1/Part2_Music_Generation.ipynb b/lab1/Part2_Music_Generation.ipynb index 010684a1..7844d514 100644 --- a/lab1/Part2_Music_Generation.ipynb +++ b/lab1/Part2_Music_Generation.ipynb @@ -71,7 +71,7 @@ "source": [ "%pip install comet_ml\n", "import comet_ml\n", - "comet_ml.init(project_name=\"6.s191lab1.2\")\n", + "comet_ml.init(project_name=\"6.s191lab1_part2\")\n", "comet_experiment = comet_ml.Experiment()\n", "\n", "# Import Tensorflow 2.0\n", @@ -93,7 +93,9 @@ "\n", "# Check that we are using a GPU, if not switch runtimes\n", "# using Runtime > Change Runtime Type > GPU\n", - "assert len(tf.config.list_physical_devices('GPU')) > 0" + "assert len(tf.config.list_physical_devices('GPU')) > 0\n", + "\n", + "from scipy.io.wavfile import write" ] }, { @@ -735,8 +737,7 @@ " model.save_weights(checkpoint_prefix)\n", " \n", "# Save the trained model and the weights\n", - "model.save_weights(checkpoint_prefix)\n", - "comet_experiment.end()\n" + "model.save_weights(checkpoint_prefix)\n" ] }, { @@ -906,7 +907,14 @@ " # If its a valid song (correct syntax), lets play it! \n", " if waveform:\n", " print(\"Generated song\", i)\n", - " ipythondisplay.display(waveform)" + " ipythondisplay.display(waveform)\n", + "\n", + " numeric_data = np.frombuffer(waveform.data, dtype=np.int16)\n", + " wav_file_path = f\"output_{i}.wav\"\n", + " write(wav_file_path, 88200, numeric_data)\n", + " comet_experiment.log_asset(wav_file_path)\n", + "\n", + "comet_experiment.end()" ] }, {