|
| 1 | +"""Loggers for RL training with Ray.""" |
| 2 | + |
| 3 | +import time |
| 4 | +import numpy as np |
| 5 | +from acme.utils.loggers import base |
| 6 | +import mlflow |
| 7 | + |
| 8 | + |
| 9 | +class MLflowLogger(base.Logger): |
| 10 | + """Logs training stats to local MLflow tracking server.""" |
| 11 | + |
| 12 | + def __init__(self, |
| 13 | + uri: str, |
| 14 | + run_id: str, |
| 15 | + label: str = '', |
| 16 | + time_delta: float = 0., |
| 17 | + ): |
| 18 | + """Initializes the logger. |
| 19 | +
|
| 20 | + Args: |
| 21 | + uri: Tracking server URI, could be e.g. 'http://127.0.0.1:8080'. |
| 22 | + run_id: UUID of existing run to log metrics and parameters for. |
| 23 | + Each process (learner, actors, etc.) will have its own logger |
| 24 | + instance, but they all will log under the same run. |
| 25 | + label: Label string to use when logging, e.g. 'learner', 'actor', |
| 26 | + 'evaluator'. |
| 27 | + time_delta: How often (in seconds) to write values. |
| 28 | + If zero, everything is written. |
| 29 | + """ |
| 30 | + # Start logging under an existing run. |
| 31 | + mlflow.set_tracking_uri(uri=uri) |
| 32 | + mlflow.start_run(run_id=run_id) |
| 33 | + |
| 34 | + self._label = label |
| 35 | + self._time = time.time() |
| 36 | + self._time_delta = time_delta |
| 37 | + self._keys2track = [ |
| 38 | + 'learner_get_variables_calls', 'learner_steps', 'learner_walltime', |
| 39 | + 'episode_length', 'episode_return', 'actor_episodes', 'actor_steps', |
| 40 | + ] |
| 41 | + # Log only a subset of tracked and calculated keys. |
| 42 | + self._keys2log = [ |
| 43 | + 'walltime_hr', 'acting-to-learning', 'episode_length', |
| 44 | + 'steps_per_second_actor', 'steps_per_second_learner', |
| 45 | + 'evaluator_episode_return', 'actor_episode_return', |
| 46 | + ] |
| 47 | + |
| 48 | + # Client will be used by Learner to calculate and log average return |
| 49 | + # over all actors. |
| 50 | + self._client = mlflow.tracking.MlflowClient() |
| 51 | + self._run_id = run_id |
| 52 | + |
| 53 | + def write(self, values: base.LoggingData): |
| 54 | + """Write data to destination. |
| 55 | + |
| 56 | + Args: |
| 57 | + values: Mapping[str, Any]. |
| 58 | + """ |
| 59 | + |
| 60 | + # Always log saved_snapshot_at_actor_steps when it occurs. |
| 61 | + if 'saved_snapshot_at_actor_steps' in values: |
| 62 | + step = values['saved_snapshot_at_actor_steps'] |
| 63 | + mlflow.log_metric( |
| 64 | + key='saved_snapshot_at_actor_steps', |
| 65 | + value=step, |
| 66 | + step=step) |
| 67 | + |
| 68 | + now = time.time() |
| 69 | + if (now - self._time) < self._time_delta: |
| 70 | + return |
| 71 | + |
| 72 | + # Format metrics. |
| 73 | + metrics = {} |
| 74 | + for k, v in values.items(): |
| 75 | + if k in self._keys2track: |
| 76 | + formatted = base.to_numpy(v) |
| 77 | + if isinstance(formatted, np.ndarray): |
| 78 | + formatted = formatted.item() |
| 79 | + metrics[k] = formatted |
| 80 | + |
| 81 | + # Calculate additional metrics. |
| 82 | + if 'learner_walltime' in metrics and metrics['learner_walltime'] > 0: |
| 83 | + metrics['walltime_hr'] = metrics['learner_walltime'] / 3600. |
| 84 | + if 'learner_steps' in metrics and metrics['learner_steps'] > 0: |
| 85 | + sps = metrics['learner_steps'] / metrics['learner_walltime'] |
| 86 | + metrics['steps_per_second_learner'] = sps |
| 87 | + if 'actor_steps' in metrics and metrics['actor_steps'] > 0: |
| 88 | + sps = metrics['actor_steps'] / metrics['learner_walltime'] |
| 89 | + metrics['steps_per_second_actor'] = sps |
| 90 | + if ('steps_per_second_actor' in metrics and |
| 91 | + 'steps_per_second_learner' in metrics): |
| 92 | + sps_act = metrics['steps_per_second_actor'] |
| 93 | + sps_lrn = metrics['steps_per_second_learner'] |
| 94 | + metrics['acting-to-learning'] = sps_act / sps_lrn |
| 95 | + if self._label == 'evaluator' and 'episode_return' in metrics: |
| 96 | + metrics['evaluator_episode_return'] = metrics['episode_return'] |
| 97 | + if 'episode_return' in metrics: |
| 98 | + metrics['actor_episode_return'] = metrics['episode_return'] |
| 99 | + |
| 100 | + # Log the subset of metrics. |
| 101 | + step = metrics['actor_steps'] if 'actor_steps' in metrics else 0 |
| 102 | + metrics = {k: v for k, v in metrics.items() if k in self._keys2log} |
| 103 | + mlflow.log_metrics(metrics, step=step) |
| 104 | + self._time = now |
| 105 | + |
| 106 | + # If this logger instance is in learner, also calculate and log average |
| 107 | + # return over all actors. |
| 108 | + if self._label == 'learner': |
| 109 | + history = self._client.get_metric_history( |
| 110 | + run_id=self._run_id, key='actor_episode_return') |
| 111 | + if history: |
| 112 | + x = [entry.step for entry in history] |
| 113 | + y = [entry.value for entry in history] |
| 114 | + x_conv, y_conv = self._convolve(x, y) |
| 115 | + logged_so_far = self._client.get_metric_history( |
| 116 | + run_id=self._run_id, key='average_episode_return') |
| 117 | + idx_from = len(logged_so_far) |
| 118 | + for value, step in zip(y_conv[idx_from:], x_conv[idx_from:]): |
| 119 | + mlflow.log_metric('average_episode_return', value, step=step) |
| 120 | + |
| 121 | + def _convolve(self, x, y, kernel_size=50): |
| 122 | + y_conv = np.convolve(y, np.ones(kernel_size)/kernel_size) |
| 123 | + y_conv = y_conv[kernel_size:-kernel_size] |
| 124 | + x_conv = x[kernel_size//2:-kernel_size//2-1] |
| 125 | + return x_conv, y_conv |
| 126 | + |
| 127 | + def close(self): |
| 128 | + """Closes the logger, not expecting any further write.""" |
| 129 | + mlflow.end_run() |
0 commit comments