Skip to content

Commit f43776d

Browse files
authored
Fix prints in DSP-aware pruning (#1396)
1 parent a57be2a commit f43776d

File tree

2 files changed

+6
-6
lines changed

2 files changed

+6
-6
lines changed

hls4ml/optimization/dsp_aware_pruning/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def optimize_keras_model_for_hls4ml(
6666
cutoff_bad_trials (int): After how many bad trials (performance below threshold),
6767
should model pruning / weight sharing stop
6868
directory (string): Directory to store temporary results
69-
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
69+
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and Manual
7070
knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
7171
default usually works well; for very large networks, greedy algorithm might be more suitable
7272
regularization_range (list): List of suitable hyperparameters for weight decay

hls4ml/optimization/dsp_aware_pruning/keras/__init__.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def optimize_model(
7676
cutoff_bad_trials (int): After how many bad trials (performance below threshold),
7777
should model pruning / weight sharing stop
7878
directory (string): Directory to store temporary results
79-
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and None
79+
tuner (str): Tuning algorithm, choose between Bayesian, Hyperband and Manual
8080
knapsack_solver (str): Algorithm to solve Knapsack problem when optimizing;
8181
default usually works well; for very large networks, greedy algorithm might be more suitable
8282
regularization_range (list): List of suitable hyperparameters for weight decay
@@ -232,10 +232,10 @@ def optimize_model(
232232
if verbose:
233233
val_res = optimizable_model.evaluate(validation_dataset, verbose=0, return_dict=False)
234234
t = time.time() - start_time
235-
avg_loss = round(epoch_loss_avg.result(), 3)
236-
print(f'Epoch: {epoch + 1} - Time: {t}s - Average training loss: {avg_loss}')
237-
print(f'Epoch: {epoch + 1} - learning_rate: {optimizable_model.optimizer.learning_rate.numpy()}')
238-
print(f'Epoch: {epoch + 1} - Validation loss: {val_res[0]} - Performance on validation set: {val_res[1]}')
235+
avg_loss = epoch_loss_avg.result()
236+
tf.print(f'Epoch: {epoch + 1} - Time: {t}s - Average training loss: {avg_loss}')
237+
tf.print(f'Epoch: {epoch + 1} - learning_rate: {optimizable_model.optimizer.learning_rate.numpy()}')
238+
tf.print(f'Epoch: {epoch + 1} - Validation loss: {val_res[0]} - Performance on validation set: {val_res[1]}')
239239

240240
# Check if model works after pruning
241241
pruned_performance = optimizable_model.evaluate(validation_dataset, verbose=0, return_dict=False)[-1]

0 commit comments

Comments
 (0)