|
| 1 | +"""Utilities for logging constant baseline metrics to Weights & Biases.""" |
| 2 | + |
| 3 | +import wandb |
| 4 | + |
| 5 | +import art |
| 6 | + |
| 7 | + |
| 8 | +async def log_constant_metrics_wandb( |
| 9 | + model: art.Model, |
| 10 | + num_steps: int, |
| 11 | + split_metrics: dict[str, dict[str, float]], |
| 12 | +) -> None: |
| 13 | + """ |
| 14 | + Log constant metrics to W&B as horizontal lines across all training steps. |
| 15 | +
|
| 16 | + Creates a W&B run and logs the same values at every step from 0 to |
| 17 | + `num_steps`, producing horizontal reference lines on charts. Useful for |
| 18 | + comparing training curves against static baselines. |
| 19 | +
|
| 20 | + Parameters |
| 21 | + ---------- |
| 22 | + model : art.Model |
| 23 | + The model whose `project` and `name` are used for the W&B run. |
| 24 | + num_steps : int |
| 25 | + Total training steps. Metrics are logged at steps 0 through `num_steps`. |
| 26 | + split_metrics : dict[str, dict[str, float]] |
| 27 | + Nested dict mapping split names (e.g., "train", "val") to metric dicts. |
| 28 | + Each metric is logged as "{split}/{metric_name}". |
| 29 | +
|
| 30 | + Example: `{"train": {"loss": 0.5}, "val": {"loss": 0.4, "accuracy": 0.8}}` |
| 31 | + """ |
| 32 | + run = wandb.init( |
| 33 | + project=model.project, |
| 34 | + name=model.name, |
| 35 | + reinit="create_new", |
| 36 | + ) |
| 37 | + |
| 38 | + # Prefix metrics with their split names |
| 39 | + prefixed_metrics = { |
| 40 | + f"{split}/{key}": value |
| 41 | + for split, metrics in split_metrics.items() |
| 42 | + for key, value in metrics.items() |
| 43 | + } |
| 44 | + |
| 45 | + # Log at every step to create horizontal lines |
| 46 | + for step in range(num_steps + 1): |
| 47 | + run.log(prefixed_metrics, step=step) |
| 48 | + |
| 49 | + run.finish() |
0 commit comments