@@ -76,7 +76,7 @@ def optimize_model(
7676 cutoff_bad_trials (int): After how many bad trials (performance below threshold),
7777 should model pruning / weight sharing stop
7878 directory (string): Directory to store temporary results
79- tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
79+ tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and Manual
8080 knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
8181 default usually works well; for very large networks, greedy algorithm might be more suitable
8282 regularization_range (list): List of suitable hyperparameters for weight decay
@@ -232,10 +232,10 @@ def optimize_model(
232232 if verbose :
233233 val_res = optimizable_model .evaluate (validation_dataset , verbose = 0 , return_dict = False )
234234 t = time .time () - start_time
235- avg_loss = round ( epoch_loss_avg .result (), 3 )
236- print (f'Epoch: { epoch + 1 } - Time: { t } s - Average training loss: { avg_loss } ' )
237- print (f'Epoch: { epoch + 1 } - learning_rate: { optimizable_model .optimizer .learning_rate .numpy ()} ' )
238- print (f'Epoch: { epoch + 1 } - Validation loss: { val_res [0 ]} - Performance on validation set: { val_res [1 ]} ' )
235+ avg_loss = epoch_loss_avg .result ()
236+ tf . print (f'Epoch: { epoch + 1 } - Time: { t } s - Average training loss: { avg_loss } ' )
237+ tf . print (f'Epoch: { epoch + 1 } - learning_rate: { optimizable_model .optimizer .learning_rate .numpy ()} ' )
238+ tf . print (f'Epoch: { epoch + 1 } - Validation loss: { val_res [0 ]} - Performance on validation set: { val_res [1 ]} ' )
239239
240240 # Check if model works after pruning
241241 pruned_performance = optimizable_model .evaluate (validation_dataset , verbose = 0 , return_dict = False )[- 1 ]
0 commit comments