| from collections import defaultdict |
| from dataclasses import dataclass |
| from typing import Literal, Type |
|
|
| import pandas as pd |
| from typing import List |
|
|
| import gradio as gr |
| import numpy as np |
| import pandas as pd |
|
|
| from viewer.literals import REFERENCE_RUNS, TASK_CONSISTENCY_BUTTON_CLOSE_LABEL, TASK_CONSISTENCY_BUTTON_LABEL |
|
|
|
|
| @dataclass |
| class PlotOptions: |
| smoothing: int |
| interpolate: bool |
| pct: bool |
| merge_seeds: str |
|
|
|
|
| @dataclass(frozen=True) |
| class TaskInfo: |
| |
| filename: str |
| name: str |
| metrics: dict[str, float] |
| hashes: dict[str, str] |
|
|
|
|
| @dataclass(frozen=True) |
| class RunInfo: |
| name: str |
| seed: int |
| step: int |
| tasks: list[TaskInfo] |
|
|
| @property |
| def full_name(self): |
| return f"{self.name}-seed-{self.seed}" if not self.name.endswith("-") else self.name |
|
|
|
|
| RunData = list[RunInfo] |
|
|
|
|
| def get_run_name_seed(run_name): |
| if "-seed-" not in run_name: |
| return run_name, 42 |
| run_name, seed = run_name.split("-seed-") |
| return run_name, int(seed) |
|
|
|
|
| def select_runs(df: pd.DataFrame, runs_to_include: list[str] | None = None, runs_to_exclude: list[str] | None = None): |
| conditions = pd.Series(True, index=df.index) |
|
|
| if runs_to_include: |
| conditions_include = [(df['runname'] == get_run_name_seed(run)[0]) & (df['seed'] == get_run_name_seed(run)[1]) for run in runs_to_include] |
| conditions = pd.concat(conditions_include, axis=1).any(axis=1) |
| if runs_to_exclude: |
| conditions_exclude = [(df['runname'] == get_run_name_seed(run)[0]) & (df['seed'] == get_run_name_seed(run)[1]) for run in runs_to_exclude] |
| conditions = ~pd.concat(conditions_exclude, axis=1).any(axis=1) |
|
|
| return df[conditions] |
|
|
| BASELINE_GROUPING_MODE = Literal["Mean", "Median", "Min", "Max"] |
| def get_groupped_score(df: pd.DataFrame, runs: list[str], groupping_mode: BASELINE_GROUPING_MODE): |
| if len(runs) == 0: |
| return pd.DataFrame(columns=df.columns) |
| |
|
|
| tasks_or_agg = [col for col in df.columns if is_task_column(col) or is_aggregate_column(col)] |
| |
| res = select_runs(df, runs_to_include=runs) |
| |
| if groupping_mode == "Mean": |
| return res.groupby("steps")[tasks_or_agg].mean().reset_index() |
| elif groupping_mode == "Median": |
| return res.groupby("steps")[tasks_or_agg].median().reset_index() |
| elif groupping_mode == "Min": |
| return res.groupby("steps")[tasks_or_agg].min().reset_index() |
| elif groupping_mode == "Max": |
| return res.groupby("steps")[tasks_or_agg].max().reset_index() |
| |
|
|
|
|
| def check_task_hash_consistency(run_data: RunData, check_task_consistency_button): |
| if not run_data or check_task_consistency_button == TASK_CONSISTENCY_BUTTON_CLOSE_LABEL: |
| return gr.update(value={}, visible=False), gr.update(value=TASK_CONSISTENCY_BUTTON_LABEL) |
| |
| hash_keys = ["hash_examples", "hash_full_prompts"] |
| task_hashes = defaultdict(lambda: defaultdict(list)) |
|
|
| for run in run_data: |
| for task_info in run.tasks: |
| hashes = task_info.hashes |
| hash_values = tuple(hashes.get(k) for k in hash_keys) |
| task_hashes[task_info.name][hash_values].append({ |
| "name": run.name, |
| "step": run.step, |
| "filename": task_info.filename |
| }) |
|
|
| conflicts = {} |
| for task, hash_groups in task_hashes.items(): |
| if len(hash_groups) > 1: |
| conflicts[task] = [ |
| { |
| "runs": runs, |
| "hashes": dict(zip(hash_keys, hash_values)) |
| } |
| for hash_values, runs in hash_groups.items() |
| ] |
|
|
| return gr.Json(value={"conflicts": conflicts}, visible=True), gr.Button(value=TASK_CONSISTENCY_BUTTON_CLOSE_LABEL) |
|
|
|
|
| def create_df_from_run_data(run_data: RunData): |
| df = pd.DataFrame([ |
| { |
| "runname": run.name, |
| "seed": run.seed, |
| "steps": run.step, |
| "agg_score_micro": 0, |
| **{ |
| f"{task_info.name}/{metric}": value |
| for task_info in run.tasks |
| for metric, value in task_info.metrics.items() |
| } |
| } for run in run_data |
| ]) |
| df = df.fillna(0) |
| return df |
|
|
|
|
| def is_task_column(column: str): |
| return "/" in column |
|
|
| def is_aggregate_column(column: str): |
| return column.startswith("agg_score") |
|
|
| def is_baseline_run(run: str): |
| return any(run.startswith(prefix) for prefix in ["random", "dummy", "baseline"]) |
|
|
| def is_reference_run(run: str): |
| return any([ref_run + "-" in run for ref_run in REFERENCE_RUNS]) |
|
|
|
|
| def z_score_normalize(df: pd.DataFrame, normalization_runs: List[str], columns: List[str], variability_window: int = 1) -> pd.DataFrame: |
| |
| if len(normalization_runs) <= 1: |
| return df |
| |
| normalization_df = select_runs(df, runs_to_include=normalization_runs) |
| |
| |
| grouped = normalization_df.groupby('steps')[columns] |
| means = grouped.mean() |
| stds = grouped.std() |
|
|
| |
| stds = stds.replace(0, 1) |
|
|
| |
| last_means = means.loc[means.index.max()] |
| |
| last_window_stds = stds.sort_index(ascending=False).head(variability_window).mean() |
|
|
| df[columns] = (df[columns].sub(last_means[columns], axis=1) |
| .div(last_window_stds[columns], axis=1)) |
| |
| return df |
|
|
| def rescale_scores(df: pd.DataFrame, normalization_runs: List[str], columns: List[str]) -> pd.DataFrame: |
| baseline = get_groupped_score(df, normalization_runs, "Mean") |
| |
| |
| baseline = baseline.set_index("steps").reindex(df["steps"].unique()).interpolate().reset_index() |
| |
| rescaled_cols = baseline.columns[~((baseline <= 0.0).all() | (baseline == 1.0).all())] |
| rescaled_cols = rescaled_cols[(rescaled_cols != 'steps') & rescaled_cols.isin(columns)] |
|
|
| df_with_baseline = df.merge(baseline[list(rescaled_cols) + ['steps']], on=["steps"], how="left", suffixes=("", "_baseline")).fillna(0) |
| df[rescaled_cols] = df[rescaled_cols].sub(df_with_baseline[rescaled_cols + '_baseline'].values) |
| df[rescaled_cols] = df[rescaled_cols].div(1 - df_with_baseline[rescaled_cols + '_baseline'].values) |
| return df |