@@ -349,7 +349,7 @@ def report_loss(self, epoch, loss_dict):
349
349
350
350
log_file_path = os .path .join (self .output_directory_path , 'reporter.log' )
351
351
df_new = pd .DataFrame .from_dict (loss_dict , orient = 'index' ).T
352
- df_new = df_new .mul (100 ) # Multiple each loss component by 100
352
+ df_new = df_new .mul (100 ) # Multiple each loss component by 100. Is this large enough?
353
353
df_new .insert (0 , 'epoch' , epoch )
354
354
355
355
if os .path .exists (log_file_path ):
@@ -536,7 +536,6 @@ def train_sampler(self, sampler_patience=800, neff_threshold=0.2, sampler_weight
536
536
sampler = SamplerReweight .samplers [sampler_index ]
537
537
loss += sampler_loss * sampler_weight
538
538
loss_dict [f'{ sampler .target_name } ' ] = sampler_loss .item ()
539
-
540
539
loss .backward ()
541
540
loss_dict ['neff' ] = neff_min
542
541
@@ -626,7 +625,7 @@ def _save_local_model(self, epoch, net_copy):
626
625
_logger .info (f'Save ckpt{ epoch } .pt as temporary espaloma model (net.pt)' )
627
626
self ._save_checkpoint (epoch )
628
627
local_model = os .path .join (self .output_directory_path , f"ckpt{ epoch } .pt" )
629
- self .save_model (net = net_copy , best_model = local_model , model_name = f"net.pt" , output_directory_path = self .output_directory_path )
628
+ self .save_model (net = net_copy , checkpoint_file = local_model , output_model = f"net.pt" , output_directory_path = self .output_directory_path )
630
629
631
630
632
631
def _setup_local_samplers (self , epoch , net_copy , debug ):
0 commit comments