instruction
stringclasses
100 values
code
stringlengths
78
193k
response
stringlengths
259
170k
file
stringlengths
59
203
Document functions with detailed explanations
# -*- coding: utf-8 -*- from typing import ( Any, Literal, AsyncGenerator, Generator, TYPE_CHECKING, ) from ._tts_base import TTSModelBase from ._tts_response import TTSResponse from ..message import Msg, AudioBlock, Base64Source from ..types import JSONSerializableObject if TYPE_CHECKING: fro...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""DashScope SDK TTS model implementation using MultiModalConversation API.""" from typing import ( Any, Literal, @@ -25,6 +26,10 @@ class DashScopeTTSModel(TTSModelBase): + """DashScope TTS model implementation using MultiModalConversation API. + Fo...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tts/_dashscope_tts_model.py
Add standardized docstrings across the file
# -*- coding: utf-8 -*- from abc import ABC, abstractmethod from typing import Any, AsyncGenerator from agentscope.message import Msg from ._tts_response import TTSResponse class TTSModelBase(ABC): supports_streaming_input: bool = False """If the TTS model class supports streaming input.""" model_nam...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The TTS model base class.""" from abc import ABC, abstractmethod from typing import Any, AsyncGenerator @@ -9,6 +10,23 @@ class TTSModelBase(ABC): + """Base class for TTS models in AgentScope. + + This base class provides general abstraction for both rea...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tts/_tts_base.py
Turn comments into proper docstrings
# -*- coding: utf-8 -*- import json from collections import OrderedDict from dataclasses import dataclass from typing import Callable, Any, Optional from ..types import JSONSerializableObject @dataclass class _JSONSerializeFunction: to_json: Optional[Callable[[Any], Any]] = None """The function converting t...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The state module in agentscope.""" import json from collections import OrderedDict @@ -17,12 +18,16 @@ class StateModule: + """The state module class in agentscope to support nested state + serialization and deserialization.""" def __init__(self) ...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/module/_state_module.py
Generate consistent docstrings
# -*- coding: utf-8 -*- import base64 from typing import TYPE_CHECKING, Any, Literal, AsyncGenerator from ._tts_base import TTSModelBase from ._tts_response import TTSResponse from ..message import Msg, AudioBlock, Base64Source from ..types import JSONSerializableObject if TYPE_CHECKING: from openai import HttpxB...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""OpenAI TTS model implementation.""" import base64 from typing import TYPE_CHECKING, Any, Literal, AsyncGenerator @@ -14,6 +15,10 @@ class OpenAITTSModel(TTSModelBase): + """OpenAI TTS model implementation. + For more details, please see the `official doc...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tts/_openai_tts_model.py
Can you add docstrings to this Python file?
# -*- coding: utf-8 -*- import base64 from typing import TYPE_CHECKING, Any, Literal, AsyncGenerator, Iterator from ._tts_base import TTSModelBase from ._tts_response import TTSResponse from ..message import Msg, AudioBlock, Base64Source from ..types import JSONSerializableObject if TYPE_CHECKING: from google.gen...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Gemini TTS model implementation.""" import base64 from typing import TYPE_CHECKING, Any, Literal, AsyncGenerator, Iterator @@ -16,6 +17,10 @@ class GeminiTTSModel(TTSModelBase): + """Gemini TTS model implementation. + For more details, please see the `of...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tts/_gemini_tts_model.py
Add return value explanations in docstrings
# -*- coding: utf-8 -*- import base64 import threading from typing import TYPE_CHECKING, AsyncGenerator, Any from ._tts_response import TTSResponse from .._logging import logger from ..message import AudioBlock, Base64Source if TYPE_CHECKING: from dashscope.audio.tts_v2 import ResultCallback else: ResultCall...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Utility classes for DashScope CosyVoice TTS models.""" import base64 import threading @@ -15,11 +16,23 @@ def _get_cosyvoice_callback_class() -> type["ResultCallback"]: + """Get the callback class for CosyVoice TTS streaming audio output. + + This callba...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tts/_utils.py
Add docstrings including usage examples
# -*- coding: utf-8 -*- from pydantic import BaseModel, Field class AlgorithmConfig(BaseModel): algorithm_type: str = Field( description=( "The tuning algorithm type " "e.g., 'multi_step_grpo', 'sft'." "Please refer to https://github.com/agentscope-ai/Trinity-RFT" ...
--- +++ @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- +"""AlgorithmConfig definition for tuner.""" from pydantic import BaseModel, Field class AlgorithmConfig(BaseModel): + """Algorithm configuration for tuning.""" algorithm_type: str = Field( description=( @@ -37,4 +39,4 @@ eval_interval_step...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tuner/_algorithm.py
Add detailed documentation for each class
# -*- coding: utf-8 -*- from typing import Any, Callable, List, Tuple from datetime import datetime import inspect from ._workflow import WorkflowType from ._judge import JudgeType from ._model import TunerModelConfig from ._dataset import DatasetConfig from ._algorithm import AlgorithmConfig def _set_if_not_none(ob...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Configuration conversion for tuner.""" from typing import Any, Callable, List, Tuple from datetime import datetime import inspect @@ -11,6 +12,7 @@ def _set_if_not_none(obj: Any, field: str, value: Any) -> None: + """Set the field of obj to value if value is...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tuner/_config.py
Create Google-style docstrings for my code
# -*- coding: utf-8 -*- from itertools import islice from typing import List from pydantic import BaseModel, Field class DatasetConfig(BaseModel): path: str = Field( description="Path to your dataset.", ) name: str | None = Field( description="The name of the dataset configuration.", ...
--- +++ @@ -1,10 +1,16 @@ # -*- coding: utf-8 -*- +"""DatasetConfig definition for tuner.""" from itertools import islice from typing import List from pydantic import BaseModel, Field class DatasetConfig(BaseModel): + """Dataset configuration for tuning. + Compatible with huggingface dataset format. + ...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tuner/_dataset.py
Add docstrings that explain logic
# -*- coding: utf-8 -*- # pylint: disable=protected-access import base64 import hashlib import json from typing import Any, Literal from ._reader_base import ReaderBase from ._text_reader import TextReader from ._utils import _get_media_type_from_data from .._document import Document, DocMetadata from ...message impor...
--- +++ @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # pylint: disable=protected-access +"""The Excel reader to read and chunk Excel files.""" import base64 import hashlib import json @@ -14,6 +15,17 @@ def _get_excel_column_name(col_index: int) -> str: + """Convert a 0-based column index to Excel column name (A,...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/rag/_reader/_excel_reader.py
Generate consistent docstrings
# -*- coding: utf-8 -*- from typing import Any from ._reme_long_term_memory_base import ReMeLongTermMemoryBase from ...._logging import logger from ....message import Msg, TextBlock from ....tool import ToolResponse class ReMeToolLongTermMemory(ReMeLongTermMemoryBase): async def record_to_memory( self, ...
--- +++ @@ -1,4 +1,11 @@ # -*- coding: utf-8 -*- +"""Tool memory implementation using ReMe library. + +This module provides a tool memory implementation that integrates +with the ReMe library to record tool execution results and retrieve +tool usage guidelines. + +""" from typing import Any from ._reme_long_term_me...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/memory/_long_term_memory/_reme/_reme_tool_long_term_memory.py
Auto-generate documentation strings for this file
# -*- coding: utf-8 -*- from abc import abstractmethod from typing import Any from .._document import Document class ReaderBase: @abstractmethod async def __call__(self, *args: Any, **kwargs: Any) -> list[Document]: @abstractmethod def get_doc_id(self, *args: Any, **kwargs: Any) -> str:
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The reader base class for retrieval-augmented generation (RAG).""" from abc import abstractmethod from typing import Any @@ -6,9 +7,21 @@ class ReaderBase: + """The reader base class, which is responsible for reading the original + data, splitting it int...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/rag/_reader/_reader_base.py
Add inline docstrings for readability
# -*- coding: utf-8 -*- import json from typing import Any, Literal, TYPE_CHECKING from .._reader import Document from ._store_base import VDBStoreBase from .._document import DocMetadata from ..._utils._common import _map_text_to_uuid from ...types import Embedding if TYPE_CHECKING: from qdrant_client import Asy...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The Qdrant local vector store implementation.""" import json from typing import Any, Literal, TYPE_CHECKING @@ -15,6 +16,13 @@ class QdrantStore(VDBStoreBase): + """The Qdrant vector store implementation, supporting both local and + remote Qdrant instanc...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/rag/_store/_qdrant_store.py
Help me write clear docstrings
# -*- coding: utf-8 -*- from abc import abstractmethod from typing import Any from .. import Document from ...types import Embedding class VDBStoreBase: @abstractmethod async def add(self, documents: list[Document], **kwargs: Any) -> None: @abstractmethod async def delete(self, *args: Any, **kwargs...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The embedding store base class.""" from abc import abstractmethod from typing import Any @@ -7,12 +8,16 @@ class VDBStoreBase: + """The vector database store base class, serving as a middle layer between + the knowledge base and the actual vector databas...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/rag/_store/_store_base.py
Write documentation strings for class attributes
# -*- coding: utf-8 -*- from enum import Enum from typing import Literal from pydantic import BaseModel from ._utils import AudioFormat from ._model_event import ModelEvents from ...message import ToolUseBlock, ToolResultBlock class ServerEventType(str, Enum): # Session lifecycle SERVER_SESSION_CREATED = "...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The websocket events generated from the realtime agent and backend.""" from enum import Enum from typing import Literal @@ -10,6 +11,7 @@ class ServerEventType(str, Enum): + """Types of agent events for backend-to-web communication.""" # Session life...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/realtime/_events/_server_event.py
Write docstrings for this repository
# -*- coding: utf-8 -*- from enum import Enum from typing import Literal from pydantic import BaseModel from ._utils import AudioFormat from ...message import ToolUseBlock class ModelEventType(str, Enum): # API session lifecycle MODEL_SESSION_CREATED = "model_session_created" """The realtime API sessio...
--- +++ @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +"""The unified event from realtime model APIs in AgentScope, which will be +consumed by the realtime agents.""" from enum import Enum from typing import Literal @@ -9,6 +11,7 @@ class ModelEventType(str, Enum): + """Types of model events from the API.""" ...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/realtime/_events/_model_event.py
Document all public functions with docstrings
import base64 import datetime import os from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence if TYPE_CHECKING: from dataclasses import dataclass else: from pydantic.dataclasses import dataclass @dataclass class CompletionFnSpec: cls: str args: Optional[Dict[str, A...
--- +++ @@ -1,3 +1,7 @@+""" +This file defines the base specifications for models, evals, and runs. Running +evals and most development work should not require familiarity with this file. +""" import base64 import datetime import os @@ -12,6 +16,9 @@ @dataclass class CompletionFnSpec: + """ + Specification ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/base.py
Generate descriptive docstrings automatically
import os from ast import literal_eval from typing import Any, Optional, Union import numpy as np import pandas as pd from openai import OpenAI from evals.api import CompletionFn, CompletionResult from evals.prompt.base import ChatCompletionPrompt, CompletionPrompt from evals.record import record_sampling from evals....
--- +++ @@ -1,3 +1,6 @@+""" +Extending Completion Functions with Embeddings-based retrieval from a fetched dataset +""" import os from ast import literal_eval from typing import Any, Optional, Union @@ -48,6 +51,9 @@ class RetrievalCompletionFn(CompletionFn): + """ + This Completion Function uses embedding...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/completion_fns/retrieval.py
Add concise docstrings to each method
import logging from typing import Any, Optional, Union import openai from openai import OpenAI from evals.api import CompletionFn, CompletionResult from evals.base import CompletionFnSpec from evals.prompt.base import ( ChatCompletionPrompt, CompletionPrompt, OpenAICreateChatPrompt, OpenAICreatePrompt...
--- +++ @@ -25,6 +25,10 @@ def openai_completion_create_retrying(client: OpenAI, *args, **kwargs): + """ + Helper function for creating a completion. + `args` and `kwargs` match what is accepted by `openai.Completion.create`. + """ result = create_retrying( client.completions.create, retry...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/completion_fns/openai.py
Add docstrings to improve readability
import os from dataclasses import dataclass from pathlib import Path import evals @dataclass class DistractorSample: question: str ideal: str VARIANTS = { "which-is-heavier", "ambiguous-sentences", "first-letters", "reverse-sort-words-eng", "distractorless", } def proc_which_is_heavie...
--- +++ @@ -70,10 +70,17 @@ def get_basic_distractor_example() -> DistractorSample: + """ + An arbitrary distractor example used in the task description for the + distractorless variant + """ return DistractorSample(question="What is the capital of Italy?", ideal="rome") def get_distractors(va...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/already_said_that/distractors.py
Write reusable docstrings
import csv import dataclasses import gzip import itertools import json import logging import os import urllib from collections.abc import Iterator from functools import partial from pathlib import Path from typing import Any, BinaryIO, List, Optional, Sequence, Text, Union import blobfile as bf import lz4.frame import...
--- +++ @@ -1,3 +1,6 @@+""" +This file defines utilities for working with data and files of various types. +""" import csv import dataclasses import gzip @@ -20,6 +23,7 @@ def gzip_open(filename: str, mode: str = "rb", openhook: Any = open) -> gzip.GzipFile: + """Wrap the given openhook in gzip.""" if mo...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/data.py
Improve my code by adding docstrings
# -*- coding: utf-8 -*- from abc import abstractmethod from ..module import StateModule class SessionBase: @abstractmethod async def save_session_state( self, session_id: str, user_id: str = "", **state_modules_mapping: StateModule, ) -> None: @abstractmethod asy...
--- +++ @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- +"""The session base class in agentscope.""" from abc import abstractmethod from ..module import StateModule class SessionBase: + """The base class for session in agentscope.""" @abstractmethod async def save_session_state( @@ -13,6 +15,16 @@ ...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/session/_session_base.py
Write documentation strings for class attributes
import random import re from typing import Any, Optional from evals.elsuite.already_said_that.distractors import DistractorSample from evals.task_state import Message def build_message( words_not_shown: set[str], words_prev_shown: set[str], distracting_words: set[str], rng: random.Random, distrac...
--- +++ @@ -13,6 +13,27 @@ rng: random.Random, distractor_sample: Optional[DistractorSample] = None, ) -> tuple[Message, list[str], Optional[DistractorSample]]: + """ + Builds the TaskState.Message for a given sample. + Randomly chooses whether to show a word (base task) or ask a question (distractor...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/already_said_that/utils.py
Write beginner-friendly docstrings
# -*- coding: utf-8 -*- import json from typing import Any, TYPE_CHECKING from ._session_base import SessionBase from .._logging import logger from ..module import StateModule if TYPE_CHECKING: from redis.asyncio import ConnectionPool, Redis else: ConnectionPool = Any Redis = Any class RedisSession(Sess...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The Redis session class.""" import json from typing import Any, TYPE_CHECKING @@ -14,6 +15,7 @@ class RedisSession(SessionBase): + """The Redis session class.""" SESSION_KEY = "user_id:{user_id}:session:{session_id}:state" """Redis key pattern (...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/session/_redis_session.py
Create docstrings for all classes and functions
# -*- coding: utf-8 -*- import datetime import enum import inspect import json from dataclasses import is_dataclass from typing import Any from pydantic import BaseModel from ..message import Msg def _to_serializable( obj: Any, ) -> Any: # Handle primitive types first if isinstance(obj, (str, int, bool...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Serialize objects to JSON string.""" import datetime import enum import inspect @@ -14,6 +15,16 @@ def _to_serializable( obj: Any, ) -> Any: + """Convert an object to a JSON serializable type. + + Args: + obj (`Any`): + The object to b...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tracing/_utils.py
Provide clean and structured docstrings
# -*- coding: utf-8 -*- # flake8: noqa: E501 # pylint: disable=line-too-long import os from ._write_text_file import _view_text_file from .._response import ToolResponse from ...exception import ToolInvalidArgumentsError from ...message import TextBlock async def view_text_file( file_path: str, ranges: list[...
--- +++ @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # flake8: noqa: E501 # pylint: disable=line-too-long +"""The view text file tool in agentscope.""" import os from ._write_text_file import _view_text_file @@ -13,6 +14,18 @@ file_path: str, ranges: list[int] | None = None, ) -> ToolResponse: + """View t...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tool/_text_file/_view_text_file.py
Write documentation strings for class attributes
# -*- coding: utf-8 -*- import inspect from typing import Any, Dict, Tuple, TYPE_CHECKING from .. import _config from ..embedding import EmbeddingModelBase from ..message import Msg, ToolUseBlock from ..model import ChatModelBase from ._attributes import ( SpanAttributes, OperationNameValues, ProviderName...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Extract attributes from AgentScope components for OpenTelemetry tracing.""" import inspect from typing import Any, Dict, Tuple, TYPE_CHECKING @@ -49,6 +50,12 @@ def _get_common_attributes() -> Dict[str, str]: + """Get common attributes for all spans. + + ...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tracing/_extractor.py
Add standardized docstrings across the file
# -*- coding: utf-8 -*- from ...exception import ToolInvalidArgumentsError def _calculate_view_ranges( old_n_lines: int, new_n_lines: int, start: int, end: int, extra_view_n_lines: int = 5, ) -> tuple[int, int]: view_start = max(1, start - extra_view_n_lines) delta_lines = new_n_lines - ...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The utility functions for text file tools in agentscope.""" from ...exception import ToolInvalidArgumentsError @@ -9,6 +10,20 @@ end: int, extra_view_n_lines: int = 5, ) -> tuple[int, int]: + """Calculate after writing the new content, the view range...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tool/_text_file/_utils.py
Create simple docstrings for beginners
# -*- coding: utf-8 -*- from typing import Any, Dict from ..message import ContentBlock from ._utils import _serialize_to_str def _convert_media_block( source: Dict[str, Any], modality: str, ) -> Dict[str, Any] | None: source_type = source.get("type") if source_type == "url": url = source....
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""Convert ContentBlock to OpenTelemetry GenAI part format.""" from typing import Any, Dict @@ -11,6 +12,18 @@ source: Dict[str, Any], modality: str, ) -> Dict[str, Any] | None: + """Convert media block (image/audio/video) to OpenTelemetry format. + + ...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tracing/_converter.py
Generate docstrings for this script
import ast import logging from typing import Sequence logger = logging.getLogger(__name__) def calculate_accuracy(tp: int, fp: int, tn: int, fn: int): accuracy = (tp + tn) / (tp + tn + fp + fn) return accuracy def calculate_precision(tp: int, fp: int): if tp + fp == 0: return 0 precision =...
--- +++ @@ -35,6 +35,10 @@ def precision_recall_fscore(metrics: Sequence[dict]): + """ + Calculates prediction metrics, where positive class is a tool being bugged. Handles edge cases + where solver never predicted a certain class + """ def tool_is_buggy(metric): return len(metric["bugs"...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/bugged_tools/utils.py
Auto-generate documentation strings for this file
from functools import total_ordering from itertools import combinations from typing import Literal, Union BluffMove = Union["PokerHand", Literal["bluff"]] CARDS = "89TJQKA" class PlayerCards: def __init__(self, cards: list[str]): assert len(cards) == 5 self.cards = {} for suit in "shdc...
--- +++ @@ -1,3 +1,4 @@+"""All the card-related logic is in this file (both player cards and poker hands)""" from functools import total_ordering from itertools import combinations @@ -10,6 +11,7 @@ class PlayerCards: def __init__(self, cards: list[str]): + """In: e.g. [As, Ah, Kh, Qd, 9c]""" ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/bluff/bluff/cards.py
Create docstrings for reusable components
# -*- coding: utf-8 -*- import os from ._workflow import WorkflowType from ._judge import JudgeType from ._model import TunerModelConfig from ._dataset import DatasetConfig from ._config import ( _to_trinity_config, check_judge_function, check_workflow_function, ) from ._algorithm import AlgorithmConfig d...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The main entry point for agent learning.""" import os from ._workflow import WorkflowType from ._judge import JudgeType @@ -26,6 +27,37 @@ monitor_type: str | None = None, config_path: str | None = None, ) -> None: + """Train the agent workflow with th...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tuner/_tune.py
Add professional docstrings to my codebase
import logging from abc import ABC, abstractmethod from typing import Any, Callable, Optional, Protocol, Union, runtime_checkable from evals.prompt.base import OpenAICreateChatPrompt, OpenAICreatePrompt, Prompt from evals.record import record_match logger = logging.getLogger(__name__) class CompletionResult(ABC): ...
--- +++ @@ -1,3 +1,7 @@+""" +This file provides common interfaces and utilities used by eval creators to +sample from models and process the results. +""" import logging from abc import ABC, abstractmethod @@ -22,6 +26,18 @@ prompt: Union[str, OpenAICreateChatPrompt], **kwargs, ) -> Completion...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/api.py
Add missing documentation to my Python functions
import argparse import os from pathlib import Path from typing import Optional import numpy as np import pandas as pd from matplotlib import pyplot as plt from evals.utils import log_utils # MODEL_NAMES = { # "error_recovery/gpt-4-0613": "GPT-4", # "generation/hhh/gpt-4-base": "GPT-4 Base", # "error_reco...
--- +++ @@ -139,6 +139,7 @@ def annotate_axes(ax, errors: Optional[pd.DataFrame]): + """Annotate each bar in the plot with its value""" ABOVE_OFFSET = 0.01 BELOW_OFFSET = 0.1 if errors is not None: @@ -355,6 +356,11 @@ def plot_accuracy_by_steps_all(metrics_df, results_df, out_dir): + """ +...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/error_recovery/scripts/make_plots.py
Create docstrings for each class method
import copy import re from importlib import import_module from typing import Optional from evals.elsuite.bluff.bluff.cards import get_bluff_move from evals.solvers.memory import PersistentMemoryCache from evals.solvers.solver import Solver, SolverResult from evals.task_state import Message, TaskState class BluffStra...
--- +++ @@ -30,6 +30,10 @@ self.interaction_cache = PersistentMemoryCache(interaction_length=1) def _generate_response(self, task_state: TaskState): + """ + Calls base solver. Modifies taks state to remove all non-reasoning messages + from assistant + """ task_state =...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/bluff/strategy_solver.py
Write beginner-friendly docstrings
# -*- coding: utf-8 -*- from __future__ import annotations from typing import Dict, Any from pydantic import BaseModel, Field class TunerModelConfig(BaseModel): model_path: str = Field( description="The path to the model checkpoint.", ) max_model_len: int = Field( description=( ...
--- +++ @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- +"""TunerModelConfig definition.""" from __future__ import annotations from typing import Dict, Any from pydantic import BaseModel, Field class TunerModelConfig(BaseModel): + """Model configuration for tuning.""" model_path: str = Field( descr...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tuner/_model.py
Write reusable docstrings
import logging import random import re from typing import Any, Sequence, Union import evals.metrics from evals.api import CompletionFn from evals.elsuite.bugged_tools.bugged_tools import ALL_BUGS from evals.elsuite.bugged_tools.task_description import ( ADVANCED_BUG_DESCRIPTION, DEFAULT_REMINDER_MESSAGE, D...
--- +++ @@ -150,6 +150,9 @@ return results def _log_additional_metrics(self, metrics: Sequence[Event], results: dict): + """ + Modifies results in-place, breaks results down per tool and per bug + """ all_tools = list(set([j for i in metrics for j in i["tools"]])) a...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/bugged_tools/eval.py
Write docstrings for algorithm functions
# -*- coding: utf-8 -*- import threading from typing import Any, Literal, TYPE_CHECKING, AsyncGenerator from ._tts_base import TTSModelBase from ._tts_response import TTSResponse from ..message import Msg, AudioBlock, Base64Source from ..types import JSONSerializableObject if TYPE_CHECKING: from dashscope.audio....
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""DashScope Realtime TTS model implementation.""" import threading from typing import Any, Literal, TYPE_CHECKING, AsyncGenerator @@ -24,8 +25,10 @@ from dashscope.audio.qwen_tts_realtime import QwenTtsRealtimeCallback class _DashScopeRealtimeTTSCallback(...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/tts/_dashscope_realtime_tts_model.py
Add docstrings to existing functions
import argparse import logging import shlex import sys from typing import Any, Mapping, Optional, Union, cast import evals import evals.api import evals.base import evals.record from evals.eval import Eval from evals.record import RecorderBase from evals.registry import Registry logger = logging.getLogger(__name__) ...
--- +++ @@ -1,3 +1,6 @@+""" +This file defines the `oaieval` CLI for running evals. +""" import argparse import logging import shlex @@ -133,6 +136,7 @@ def parse_extra_eval_params( param_str: Optional[str], ) -> Mapping[str, Union[str, int, float]]: + """Parse a string of the form "key1=val...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/cli/oaieval.py
Annotate my code with docstrings
# -*- coding: utf-8 -*- import os from typing import Any from agentscope.token._token_base import TokenCounterBase class HuggingFaceTokenCounter(TokenCounterBase): def __init__( self, pretrained_model_name_or_path: str, use_mirror: bool = False, use_fast: bool = False, tr...
--- +++ @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +"""The huggingface token counter class.""" import os from typing import Any @@ -6,6 +7,7 @@ class HuggingFaceTokenCounter(TokenCounterBase): + """The token counter for Huggingface models.""" def __init__( self, @@ -15,6 +17,23 @@ trust_...
https://raw.githubusercontent.com/agentscope-ai/agentscope/HEAD/src/agentscope/token/_huggingface_token_counter.py
Auto-generate documentation strings for this file
import copy import random from dataclasses import dataclass from typing import Any, List, Literal, Optional, Sequence import evals import evals.metrics import evals.record from evals.api import CompletionFn from evals.elsuite.error_recovery.defaults import ( DEFAULT_FINAL_ANSWER_MESSAGE, DEFAULT_MISTAKE_MESSAG...
--- +++ @@ -45,6 +45,25 @@ *args, **kwargs, ): + """Evaluate a solver on the error recovery task. + + Args: + completion_fns: The completion functions to evaluate. (should be a single solver) + samples_jsonl: The relative path to the samples jsonl file in evals/...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/error_recovery/eval.py
Generate descriptive docstrings automatically
import math import random import string from typing import Callable, Union import numpy as np from evals.elsuite.bugged_tools.tools import ToolResult, ToolTaskState def explicit_error(tool_task_state: ToolTaskState, non_bugged_func: Callable) -> ToolResult: correct_out = non_bugged_func(tool_task_state) if ...
--- +++ @@ -20,6 +20,9 @@ def offset_numeric(correct_output: Union[int, float], offset: Union[int, float]) -> ToolResult: + """ + Returns +-offset the correct output + """ offset = random.choice([-offset, offset]) out = correct_output + offset # Cast to type of correct output return ToolRes...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/bugged_tools/bugged_tools.py
Help me write clear docstrings
import argparse import copy import os import random from typing import Optional, Sequence from stockfish import Stockfish from tqdm import tqdm from evals.elsuite.cant_do_that_anymore.chess.board import BoardController from evals.elsuite.cant_do_that_anymore.chess.move_variants import DIAGONAL_MOVES from evals.elsuit...
--- +++ @@ -40,6 +40,11 @@ def get_stockfish_move(stockfish: Stockfish, num_moves_to_consider: int) -> str: + """ + Gets the next move predicted by stockfish. Gets top n predictions and + selects randomly weighted by each move's centipawn value + Filters out bishop promotions, since our variant shouldn'...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/cant_do_that_anymore/scripts/diagonal_dataset_creation.py
Write Python docstrings for this snippet
import logging import math from collections import Counter from pathlib import Path import numpy as np from scipy.stats import entropy from evals.data import get_jsonl from evals.elsuite.function_deduction.eval import CurrentState, Sample from evals.registry import Registry from evals.solvers.solver import Solver, So...
--- +++ @@ -14,6 +14,17 @@ class AverageBaseline(Solver): + """ + For given test inputs (x, y, z): + * Ask about values of (x-1, x+1, y-1, y+1, z-1, z+1) + * Make three guesses: round/floor/ceil of average values for neighboring numbers + If didn't succeed in 9 rounds (6x ask 3x guess) - surrender. +...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/function_deduction/baselines.py
Add minimal docstrings for each function
from functools import cache from pathlib import Path import numpy as np from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") su...
--- +++ @@ -7,6 +7,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/ant/scripts/grade.py
Write docstrings for algorithm functions
from functools import cache from pathlib import Path from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") submission_file = subm...
--- +++ @@ -5,6 +5,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/bipedal_walker/scripts/grade.py
Document all public functions with docstrings
import logging import shutil from functools import cache from pathlib import Path from tempfile import TemporaryDirectory import pandas as pd from torchvision import datasets import evals.elsuite.hr_ml_agent_bench.benchmarks.cifar10.env.train as baseline_script from evals.elsuite.hr_ml_agent_bench.low_level_actions i...
--- +++ @@ -38,12 +38,22 @@ def get_human_baseline_score() -> float: + """ + Returns the accuracy of the best attempt by a human on the CIFAR-10 + Kaggle leaderboard: https://www.kaggle.com/c/cifar-10/leaderboard. + + Last updated: 2024-01-18. + """ return 95.530 @cache def get_naive_bas...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/cifar10/scripts/grade.py
Provide docstrings following PEP 257
from functools import cache from pathlib import Path from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") submission_file = subm...
--- +++ @@ -5,6 +5,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/cartpole/scripts/grade.py
Add docstrings for better understanding
import argparse from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd from evals.utils import log_utils def main(): parser = argparse.ArgumentParser() parser.add_argument("--log_dir", type=str, required=True) parser.add_argument("--out_dir", type=str, required=T...
--- +++ @@ -1,3 +1,4 @@+"""Take results from recent experiments and make a bar plot""" import argparse from pathlib import Path @@ -70,6 +71,7 @@ def parse_spec(spec: dict) -> tuple[str, str, int]: + """parse the spec from a bp run""" voter_model = spec["completion_fns"][0] influencer_model = spec[...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/ballots/scripts/make_plots.py
Add docstrings to make code maintainable
from pathlib import Path import numpy as np import pandas as pd from evals.elsuite.hr_ml_agent_bench.benchmarks.feedback.env.train import ( DIMENSIONS, compute_metrics_for_regression, ) def get_score(submission_dir: Path) -> float: submission_path = submission_dir / "submission.csv" if not submissi...
--- +++ @@ -45,15 +45,33 @@ def get_human_baseline_score() -> float: + """ + Returns the "Multi-class logarithmic loss" of the best attempt by a human + on the Feedback Prize Kaggle leaderboard: + https://www.kaggle.com/competitions/feedback-prize-effectiveness/leaderboard. + + Last updated: 2024-01-...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/feedback/scripts/grade.py
Improve my code by adding docstrings
from functools import cache from pathlib import Path import numpy as np from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") su...
--- +++ @@ -7,6 +7,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/humanoid/scripts/grade.py
Add docstrings to improve readability
from pathlib import Path import numpy as np import pandas as pd from sklearn.metrics import root_mean_squared_error def get_score(submission_dir: Path) -> float: submission_path = submission_dir / "submission.csv" if not submission_path.exists(): # no submission return get_naive_baseline_score() ...
--- +++ @@ -6,6 +6,7 @@ def get_score(submission_dir: Path) -> float: + """Returns the score of the submission in the given directory.""" submission_path = submission_dir / "submission.csv" @@ -45,15 +46,33 @@ def get_human_baseline_score() -> float: + """ + Returns the RMSE of the best attemp...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/house_price/scripts/grade.py
Add docstrings for better understanding
from pathlib import Path import pandas as pd from datasets import load_dataset def get_score(submission_dir: Path) -> float: if not submission_dir.is_dir(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") submission_file = submission_dir / "submission.csv" if not su...
--- +++ @@ -33,14 +33,30 @@ def get_human_baseline_score() -> float: + """ + Returns the accuracy of the best attempt by a human + on the IMDb Papers with Code leaderboard: + https://paperswithcode.com/sota/sentiment-analysis-on-imdb. + + Last updated: 2024-01-18. + """ return 96.21 de...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/imdb/scripts/grade.py
Auto-generate documentation strings for this file
import logging import shutil from functools import cache from pathlib import Path from tempfile import TemporaryDirectory import numpy as np import pandas as pd import torch from ogb.nodeproppred import Evaluator, PygNodePropPredDataset from evals.elsuite.hr_ml_agent_bench.utils import get_root_dir logger = logging....
--- +++ @@ -48,12 +48,24 @@ def get_human_baseline_score() -> float: + """ + Returns the test accuracy of the best attempt by a human on the + ogbn-arxiv leaderboard: + https://ogb.stanford.edu/docs/leader_nodeprop/#ogbn-arxiv. + + Last updated: 2024-01-18. + """ return 78.03 @cache d...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/ogbn_arxiv/scripts/grade.py
Create docstrings for all classes and functions
from functools import cache from pathlib import Path from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") submission_file = subm...
--- +++ @@ -5,6 +5,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/inverted_pendulum/scripts/grade.py
Write clean docstrings for readability
from pathlib import Path import pandas as pd from evals.elsuite.hr_ml_agent_bench.benchmarks.parkinsons_disease.env.train import smapep1 def get_score(submission_dir: Path) -> float: submission_fname = submission_dir / "submission.csv" answers_fname = Path(__file__).parent / "answer.csv" if not submis...
--- +++ @@ -6,6 +6,11 @@ def get_score(submission_dir: Path) -> float: + """ + Returns 100 - % error, where '% error' is the symmetric + mean absolute percentage error (SMAPE) of the + predictions stored in `submission.csv`. Higer is better. + """ submission_fname = submission_dir / "submissio...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/parkinsons_disease/scripts/grade.py
Write reusable docstrings
from functools import cache from pathlib import Path from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") submission_file = subm...
--- +++ @@ -5,6 +5,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/pong/scripts/grade.py
Add structured docstrings to improve clarity
import json import random from typing import Any, Dict, List, Mapping, Union, cast import numpy as np import evals from evals.api import CompletionFn from evals.record import RecorderBase def json_match(sampled_json: Any, correct_json: Any) -> bool: if sampled_json is None or correct_json is None: # Mis...
--- +++ @@ -10,6 +10,8 @@ def json_match(sampled_json: Any, correct_json: Any) -> bool: + """Return True if the sampled completion in JSON format + matches a correct answer, component by component""" if sampled_json is None or correct_json is None: # Missing values are never correct ret...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/basic/json_match.py
Improve documentation using docstrings
import time from typing import Union import numpy as np def relu(x: np.ndarray) -> np.ndarray: return x * (x > 0) def add_padding(X: np.ndarray, pad_size: Union[int, list, tuple], pad_val: int = 0) -> np.ndarray: assert len(X.shape) == 4, "Input image should be form of [Batch, Width, Height, Channels]" ...
--- +++ @@ -5,10 +5,25 @@ def relu(x: np.ndarray) -> np.ndarray: + """ + Relu activation function. Returns max(0,value) + args: + x: input array of any shape + output: All negatives clipped to 0 + """ return x * (x > 0) def add_padding(X: np.ndarray, pad_size: Union[int, list, tuple]...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/vectorization/env/train.py
Add docstrings to incomplete code
from functools import cache from pathlib import Path import numpy as np from evals.elsuite.hr_ml_agent_bench.utils import get_baseline_score def get_score(submission_dir: Path) -> float: if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: {submission_dir}") su...
--- +++ @@ -7,6 +7,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score (the average reward over 100 consecutive + episodes) of the submission in the given directory. + """ if not submission_dir.exists(): raise ValueError(f"Expected to be given a directory. Got: ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/pusher/scripts/grade.py
Write docstrings for utility functions
from pathlib import Path import pandas as pd def get_score(submission_dir: Path) -> float: submission_path = submission_dir / "submission.csv" if not submission_path.exists(): # no submission return get_naive_baseline_score() answer_path = Path(__file__).parent / "answer.csv" assert answ...
--- +++ @@ -4,6 +4,10 @@ def get_score(submission_dir: Path) -> float: + """ + Returns the score of the submission in the given directory. + The score is defined as the classification accuracy of the submission. + """ submission_path = submission_dir / "submission.csv" @@ -35,14 +39,26 @@ d...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/spaceship_titanic/scripts/grade.py
Add well-formatted docstrings
import time from typing import Union import numpy as np def relu(x: np.ndarray) -> np.ndarray: return x * (x > 0) def add_padding(X: np.ndarray, pad_size: Union[int, list, tuple], pad_val: int = 0) -> np.ndarray: assert len(X.shape) == 4, "Input image should be form of [Batch, Width, Height, Channels]" ...
--- +++ @@ -1,3 +1,6 @@+""" +Attempt version: v20240124 +""" import time from typing import Union @@ -5,10 +8,25 @@ def relu(x: np.ndarray) -> np.ndarray: + """ + Relu activation function. Returns max(0,value) + args: + x: input array of any shape + output: All negatives clipped to 0 + """...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/vectorization/scripts/human_baseline.py
Write documentation strings for class attributes
import copy import fnmatch import json import os import shutil import signal import time from logging import getLogger from multiprocessing import active_children from pathlib import Path from traceback import format_exception from typing import Optional from dacite import from_dict from evals.elsuite.hr_ml_agent_be...
--- +++ @@ -1,3 +1,10 @@+""" +This file defines the `Environment` class, which manages the agent's workspace, including files, +datasets, and other resources. + +Note: This file is adapted from MLAgentBench with minimal edits made. The original file can be +found at: https://github.com/snap-stanford/MLAgentBench/blob/m...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/environment.py
Generate docstrings for script automation
import glob import inspect import os import selectors import shutil import subprocess import sys import time from functools import wraps from io import StringIO from logging import getLogger from evals.elsuite.hr_ml_agent_bench.schema import Action, ActionInfo, EnvException, Step from evals.elsuite.hr_ml_agent_bench...
--- +++ @@ -1,3 +1,10 @@+""" +This file defines low-level actions for the MLAgentBench environment. Low-level actions are +primitive actions that can be directly executed by the environment. + +Note: This file is adapted from MLAgentBench with minimal edits made. The original file can be +found at: https://github.com/s...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/low_level_actions.py
Help me write clear docstrings
import re from functools import cache from pathlib import Path from shutil import copyfile from tempfile import TemporaryDirectory import evals.elsuite.hr_ml_agent_bench.benchmarks.vectorization.env.train as baseline_script from evals.elsuite.hr_ml_agent_bench.low_level_actions import execute_script def get_score(su...
--- +++ @@ -69,6 +69,11 @@ def get_human_baseline_score() -> float: + """ + Executes human baseline script `human_baseline.py` and returns the + execution time. Expects the score to be printed to stdout and to follow the + following pattern: 'Time taken for execution: {interval_time} seconds'. + """ ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/benchmarks/vectorization/scripts/grade.py
Add docstrings for better understanding
import json import os import subprocess import sys from logging import getLogger from evals.elsuite.hr_ml_agent_bench.utils import get_data_dir benchmarks_dir = os.path.dirname(os.path.realpath(__file__)) + "/benchmarks" logger = getLogger(__name__) def get_research_problem(task: str) -> str: data_dir = get_d...
--- +++ @@ -1,3 +1,4 @@+"""Prepare a benchmark folder for a task.""" import json import os @@ -12,6 +13,7 @@ def get_research_problem(task: str) -> str: + """Get the research problem for the given task.""" data_dir = get_data_dir() @@ -35,6 +37,7 @@ def prepare_task(benchmark_dir, python_command...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/prepare_task.py
Document this script properly
import json import textwrap from evals.elsuite.hr_ml_agent_bench.actions import ACTION_SPACE from evals.elsuite.hr_ml_agent_bench.schema import ActionInfo ACTION_FORMAT = """ - {name}: {description} Usage: ``` Action: {name} Action Input: {usage} ``` Returns: ...
--- +++ @@ -38,6 +38,7 @@ def format_action(action: ActionInfo) -> str: + """Formats a single action into a descriptive string.""" usage = json.dumps(action.usage, indent=4, ensure_ascii=False) indented_usage = textwrap.indent(text=usage, prefix=" " * 8) @@ -52,15 +53,17 @@ def get_actions_descri...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/prompts.py
Create documentation strings for testing functions
import logging import os import subprocess from pathlib import Path from shutil import copyfile from subprocess import CalledProcessError from tempfile import TemporaryDirectory from typing import Callable, Optional import torch from openai import OpenAI from evals.solvers.solver import Solver from evals.task_state i...
--- +++ @@ -18,6 +18,7 @@ def complete_text(prompt: str, solver: Solver, **kwargs) -> str: + """Complete text using the given solver.""" assert isinstance(solver, Solver) @@ -28,21 +29,25 @@ def get_root_dir() -> Path: + """Returns the root directory of the repository.""" return get_parent...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/utils.py
Fully document this Python code with docstrings
from typing import Any, List, Optional, Set, Tuple, Union import networkx as nx import numpy as np def val_and_count_roots( nodes: List[str], np_rng: np.random.Generator, total_edges: Optional[int] = None, min_roots: Optional[int] = None, ) -> int: n_nodes = len(nodes) if min_roots is not Non...
--- +++ @@ -1,3 +1,4 @@+"""Utils for network graph related operations.""" from typing import Any, List, Optional, Set, Tuple, Union import networkx as nx @@ -10,6 +11,16 @@ total_edges: Optional[int] = None, min_roots: Optional[int] = None, ) -> int: + """ + Validates the parameters for the construc...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/graph_utils.py
Create docstrings for API functions
from collections import defaultdict import numpy as np from evals.elsuite.bluff.bluff.cards import ( CARDS, PlayerCards, get_all_winning_hands, get_bluff_move, ) from evals.elsuite.bluff.bluff.game import Game class Player: def __init__(self, game: Game): self.game = game self.ga...
--- +++ @@ -36,6 +36,7 @@ class HonestBotLowest(Player): def make_move(self): + """Bid the lowest poker hand you have in cards, or call bluff if it is too low""" winning_hands = get_all_winning_hands(self.cards) current_round = self.game.rounds[-1] for hand in winning_hands: @@ ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/bluff/bluff/players.py
Help me document legacy Python code
from typing import Dict, List, Set import numpy as np from evals.elsuite.identifying_variables.utils import parse_solver_preds from evals.solvers.solver import SolverResult def compute_DCG(ranking: List[float], ceil_negs: bool = False) -> float: dcg = 0 for i, rel in enumerate(ranking, start=1): if ...
--- +++ @@ -7,6 +7,9 @@ def compute_DCG(ranking: List[float], ceil_negs: bool = False) -> float: + """ + Computes the DCG of a ranking + """ dcg = 0 for i, rel in enumerate(ranking, start=1): if ceil_negs: @@ -16,6 +19,10 @@ def compute_nDCG(ranking: List[float], best: List[float], w...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/metrics.py
Generate documentation strings for clarity
import logging import random from dataclasses import asdict from typing import Dict, List, Optional, Tuple import networkx as nx import numpy as np from evals.elsuite.identifying_variables import constants, graph_utils, prompts from evals.elsuite.identifying_variables.metrics import ( compute_fallout, compute...
--- +++ @@ -1,3 +1,6 @@+""" +Implementation logic for Identifying Variables eval +""" import logging import random from dataclasses import asdict @@ -93,6 +96,9 @@ return self._compute_agg_metrics(metrics) def _compute_agg_metrics(self, metrics: List[Dict]) -> Dict[str, float]: + """ + C...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/eval.py
Add missing documentation to my Python functions
from pathlib import Path from typing import Dict, Tuple import numpy as np import pandas as pd from tqdm.auto import tqdm from evals.elsuite.identifying_variables.metrics import compute_metric_posthoc from evals.elsuite.identifying_variables.scripts.plotting_utils import ( plot_difficulty_bars, plot_solver_ba...
--- +++ @@ -92,6 +92,9 @@ def handle_posthoc_metrics(final_results: Dict, log_path: Path, solver: str): + """ + Computes and includes missing metrics from log file if they are not present + """ metric_entries = log_utils.extract_individual_results(log_path) sampling_entries = log_utils.extract_in...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/scripts/make_plots.py
Help me comply with documentation standards
from typing import List, Set, Tuple from evals.elsuite.identifying_variables.structs import Sample from evals.elsuite.identifying_variables.renderers.base import RendererBase import evals.elsuite.identifying_variables.graph_utils as graph_utils import evals.elsuite.identifying_variables.renderers.templates as template...
--- +++ @@ -8,8 +8,27 @@ class CorrSetRenderer(RendererBase): + """ + Describes the correlation structure of variables + """ def determine_sample_type(self, sample: Sample) -> Tuple[str, List[Set[str]]]: + """ + Determines the type of sample we have, returning the correlation sets in +...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/renderers/corrset.py
Generate consistent documentation across files
from dataclasses import asdict import os import argparse from typing import Dict, List, Optional, Set, Tuple, Any import json import copy from tqdm.auto import tqdm import networkx as nx import numpy as np import evals.elsuite.identifying_variables.latent_funcs as latent_funcs from evals.elsuite.identifying_variable...
--- +++ @@ -1,3 +1,8 @@+""" +Code for generating .jsonl dataset for identifying variables eval + +Use default argparse args to replicate the dataset used for the report +""" from dataclasses import asdict import os @@ -35,6 +40,13 @@ def random_latent_func_meta( np_rng: np.random.Generator, input_x: Optional[s...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/scripts/gen_data.py
Fill in missing docstrings in my code
import re from typing import Dict import networkx as nx import numpy as np from evals.elsuite.identifying_variables.structs import Answer, Sample from evals.solvers.solver import SolverResult def parse_solver_preds(solver_result: SolverResult) -> Answer: solver_string = solver_result.output.strip().lower() ...
--- +++ @@ -54,6 +54,10 @@ def sample_serializer(obj): + """ + Custom serializer to pass to json.dumps when + saving a sample dictionary to jsonl + """ if isinstance(obj, set): return list(obj) elif isinstance(obj, nx.DiGraph): @@ -65,6 +69,7 @@ def json_to_sample(serialized_sampl...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/utils.py
Generate documentation strings for clarity
from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import networkx as nx @dataclass class Answer: valid_hypothesis: bool ind_var: Optional[str] dep_var: Optional[str] ctrl_vars: Optional[List[str]] @dataclass class Sample: variable_metadata: Dict hypotheses: n...
--- +++ @@ -1,3 +1,4 @@+"""Custom data structures for the eval""" from dataclasses import dataclass from typing import Dict, List, Optional, Tuple @@ -14,6 +15,30 @@ @dataclass class Sample: + """ + A sample of the dataset for the eval. + + Args: + variable_metadata (Dict) : A dictionary mapping ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/identifying_variables/structs.py
Generate docstrings for exported functions
import os import re from typing import Literal from openai import OpenAI client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) def is_system_msg(m: dict) -> bool: assert isinstance(m, dict), "Message must be a dict." assert "role" in m, "Message must have a role." assert isinstance(m["role"], str), ...
--- +++ @@ -16,6 +16,7 @@ def format_msg(msg: dict, role: Literal["mark", "conartist"]) -> dict: + """Formats a single message from the perspective of the `role`.""" # pre-conditions if "system" in msg["role"]: assert role in msg["role"], "Can't format system message for other user!" @@ -47,6 ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/make_me_pay/utils.py
Help me document legacy Python code
import logging import string from typing import Any, Callable, Iterable, Optional, Union from evals import CompletionFn from evals.elsuite.modelgraded.base import ModelGradedSpec from evals.elsuite.utils import PromptFn, format_necessary, format_prompt from evals.prompt.base import OpenAICreateChatPrompt, is_chat_prom...
--- +++ @@ -103,12 +103,14 @@ def choice_to_str(choice_strings: Iterable[str]) -> str: + """Return a string of choices, e.g. '"Yes" or "No" or "Maybe"'.""" return " or ".join(f'"{choice}"' for choice in choice_strings) def get_choice( text: str, eval_type: str, match_fn: Union[str, Callable], choi...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/modelgraded/classify_utils.py
Generate documentation strings for clarity
import json import logging import os from multiprocessing.pool import ThreadPool from typing import Sequence import chess from tqdm import tqdm from evals.elsuite.cant_do_that_anymore.chess.board import BoardController from evals.elsuite.cant_do_that_anymore.chess.board_test import default_board_init from evals.elsui...
--- +++ @@ -25,6 +25,10 @@ def construct_messages(previous_moves: Sequence[str]) -> Sequence[Message]: + """ + Creates list of Message's containing the previous chess moves. The last + Message is always from the "user" + """ solver_is_white = len(previous_moves) % 2 == 0 messages = [] cur...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/cant_do_that_anymore/utils.py
Document classes and their methods
import copy from typing import Sequence from evals.elsuite.cant_do_that_anymore.chess.utils import ( Move, coord_within_board, get_other_player_id, get_path_between_coords, has_piece_been_moved, move_crosses_pieces, parse_piece, ) class Piece: def __init__( self, piece...
--- +++ @@ -49,6 +49,10 @@ start_coord: Sequence[int], previous_moves: Sequence[Move], ) -> Sequence[Move]: + """ + Returns a sequence representing all moves this piece can make given the current environment + and rules this piece follows + """ if player_id == ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/cant_do_that_anymore/chess/pieces.py
Write docstrings for utility functions
import argparse import copy import os import pathlib from typing import Sequence import chess.pgn import requests import zstandard from tqdm import tqdm from evals.elsuite.cant_do_that_anymore.chess.board import BoardController from evals.elsuite.cant_do_that_anymore.chess.utils import Move, parse_piece from evals.el...
--- +++ @@ -19,6 +19,10 @@ def prepare_lichess_2014_dataset(out_dir: str) -> str: + """ + Downloads and extracts Lichess 2014 April dataset, returns the + path to the extracted .pgn file + """ fname = "lichess_db_standard_rated_2014-04.pgn.zst" raw_data_out_path = os.path.join(out_dir, fname) ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/cant_do_that_anymore/scripts/dataset_creation.py
Write docstrings for this repository
import logging import time from pathlib import Path from typing import Optional import docker import requests from evals.elsuite.multistep_web_tasks.constants import ( AVAILABLE_SERVICES, CACHE_DIR, DEFAULT_PORT_MAPPINGS, DOCKER_NAMES, DOWNLOAD_URLS, LOCAL_NETWORK, ServiceIdentifier, ) fro...
--- +++ @@ -91,6 +91,9 @@ return containers def setup_network(self) -> docker.models.networks.Network: # type: ignore + """Set up a network with the network name from constants.py. + Currently I just set up the network here and rely on the bash container to get it + TODO: Work out i...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/session.py
Generate helpful docstrings for debugging
import copy import logging import re from functools import cached_property from typing import Any import tiktoken from evals.completion_fns.openai import OpenAIChatCompletionFn from evals.elsuite.multistep_web_tasks.solvers.strong_solver.strong_prompts import ( EXAMPLE_TEMPLATE, PROMPT, ) from evals.elsuite.m...
--- +++ @@ -28,6 +28,7 @@ class StrongSolver(Solver): + """Chat-model-based solver that uses Chain of Thought by default.""" def __init__( self, @@ -111,6 +112,9 @@ return SolverResult(parsed_action) def _add_action_splitter_to_actions(self, messages: list[Message]) -> list[Message...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/solvers/strong_solver/strong_solver.py
Document functions with clear intent
import copy from typing import Callable, Dict, Sequence from evals.elsuite.cant_do_that_anymore.chess.notation import NotationParser from evals.elsuite.cant_do_that_anymore.chess.pieces import Piece from evals.elsuite.cant_do_that_anymore.chess.utils import ( Move, get_other_player_id, get_path_between_coo...
--- +++ @@ -12,6 +12,11 @@ class Board: + """ + Represents one board position. Is instantiated several times + by the BoardController to simulate future boards after playing + some moves. + """ def __init__( self, @@ -58,6 +63,13 @@ return str_board def _update_board(se...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/cant_do_that_anymore/chess/board.py
Help me comply with documentation standards
import logging import re from typing import Any from evals.completion_fns.openai import OpenAIChatCompletionFn from evals.elsuite.multistep_web_tasks.solvers.webarena_solvers.webarena_prompts import ( COT_BASH_BROWSER_PROMPT, COT_BROWSER_PROMPT, ) from evals.elsuite.multistep_web_tasks.utils import MWTTaskSta...
--- +++ @@ -1,3 +1,9 @@+""" +These Solvers are modified versions of the agents implemented in the original +WebArena project. The modifications include adding a Bash command, and editing +the instructions a little (such as replacing one Browser few-shot example with a +Bash few-shot example). +""" import logging im...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/solvers/webarena_solvers/webarena_solvers.py
Generate docstrings for script automation
import logging from typing import Optional, Union from beartype import beartype from playwright.sync_api import ViewportSize from evals.elsuite.multistep_web_tasks.session import Session from evals.elsuite.multistep_web_tasks.webarena.bash_browser_env.bash_browser_utils import ( BashBrowserEnvOutput, ) from evals...
--- +++ @@ -22,6 +22,11 @@ class BashBrowserEnv(LLMAgentEnv): + """Currently, this is implemented as a wrapper around a BashEnv and a + BrowserEnv. I'm not sure if this is ideal -- I'm worried that e.g. running + a bash command that should change something for the BrowserEnv won't + actually register t...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/bash_browser_env/bash_browser_env.py
Add docstrings for production code
import json import re from typing import Optional from evals.elsuite.hr_ml_agent_bench.high_level_actions import HIGH_LEVEL_ACTIONS from evals.elsuite.hr_ml_agent_bench.low_level_actions import LOW_LEVEL_ACTIONS from evals.elsuite.hr_ml_agent_bench.schema import Action ACTION_SPACE = LOW_LEVEL_ACTIONS + HIGH_LEVEL_AC...
--- +++ @@ -15,6 +15,7 @@ def get_action(s: str) -> Optional[Action]: + """Return an `Action` object from a string representation of an action, if it exists.""" action_pattern = r"Action:\s*(.+)" args_pattern = r"Action Input:\s*(\{.*?\}|\S.*)" @@ -40,6 +41,7 @@ def is_valid_action(action: Action...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/hr_ml_agent_bench/actions.py
Write docstrings for algorithm functions
import logging import time from pathlib import Path from typing import Optional, Union from beartype import beartype from evals.elsuite.multistep_web_tasks.session import Session from evals.elsuite.multistep_web_tasks.webarena.browser_env.actions import ( BrowserAction, BrowserActionTypes, create_id_based...
--- +++ @@ -34,6 +34,16 @@ class BrowserEnv(LLMAgentEnv): + """This class is a rewrite of ScriptBrowserEnv from WebArena that fits + with my refactor of the codebase. + + From the original: + "The goal of this environment is to produce a prototype of a browser environment. + In the end, we want to su...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/browser_env/basic_browser_env.py
Add docstrings explaining edge cases
import ast import logging import random import re import string from dataclasses import dataclass from enum import IntEnum from itertools import chain from typing import Any, Optional, TypedDict, Union, cast import numpy as np import numpy.typing as npt from beartype import beartype from beartype.door import is_beara...
--- +++ @@ -1,3 +1,7 @@+""" +Browser Env action space. +Inspited by Farama-Foundation/miniwob-plusplus +""" import ast import logging @@ -57,6 +61,7 @@ @beartype def is_in_viewport(element: Locator, viewport: ViewportSize, threshold: float = 0.3) -> bool: + """Given a playwright locator, check if it is in the...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/browser_env/actions.py
Add clean documentation to messy code
import glob from itertools import combinations from pathlib import Path from beartype import beartype from playwright.sync_api import sync_playwright from evals.elsuite.multistep_web_tasks.webarena.browser_env.env_config import ( ACCOUNTS, GITLAB, REDDIT, SHOPPING, SHOPPING_ADMIN, ) HEADLESS = T...
--- +++ @@ -1,3 +1,4 @@+"""Script to automatically login each website""" import glob from itertools import combinations @@ -20,6 +21,7 @@ @beartype def is_expired(storage_state: Path, url: str, keyword: str, url_exact: bool = True) -> bool: + """Test whether the cookie is expired""" if not storage_state....
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/browser_env/auto_login.py
Write beginner-friendly docstrings
import base64 import io import json import re from pathlib import Path from typing import Any, Optional from beartype import beartype from PIL import Image from evals.elsuite.multistep_web_tasks.webarena.browser_env import ( BrowserAction, BrowserActionTypes, ObservationMetadata, action2str, ) from ev...
--- +++ @@ -40,6 +40,7 @@ observation_metadata: dict[str, ObservationMetadata], action_set_tag: str, ) -> str: + """Parse the predicted actions for rendering purpose. More comprehensive information""" if action_set_tag == "id_accessibility_tree": text_meta_data = observation_metadata["text"] ...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/browser_env/helper_functions.py
Document functions with detailed explanations
import logging import re from collections import defaultdict from typing import Any, Optional, TypedDict import numpy as np import numpy.typing as npt from beartype import beartype from playwright.sync_api import CDPSession, Page, ViewportSize from evals.elsuite.multistep_web_tasks.webarena.browser_env.browser_utils ...
--- +++ @@ -125,6 +125,12 @@ @beartype def retrieve_viewport_info(self, info: BrowserState) -> None: + """Add viewport related information to the DOMTree + 1. add union bound, which is a union of all the bounds of the nodes in the subtree + This is only used when current_viewport_only is...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/browser_env/processors.py
Write docstrings for this repository
from abc import ABC, abstractmethod, abstractproperty from dataclasses import dataclass from typing import Any, NamedTuple, Optional from gymnasium import Env from evals.elsuite.multistep_web_tasks.webarena.core.utils import ExperimentConfig # These classes are extremely small, # and are just there for type-checkin...
--- +++ @@ -1,3 +1,6 @@+"""This file contains abstract classes representing Actions, Observations, and Environments. +This abstraction should be able to handle ScriptBrowserEnv and BashEnv, as well as a combination +of the two.""" from abc import ABC, abstractmethod, abstractproperty from dataclasses import datacla...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/core/env.py
Create docstrings for API functions
import argparse import logging from pathlib import Path from evals.elsuite.multistep_web_tasks.session import Session from evals.elsuite.multistep_web_tasks.utils import MWTTaskState from evals.elsuite.multistep_web_tasks.webarena.bash_browser_env.bash_browser_env import ( BashBrowserEnv, ) from evals.elsuite.mult...
--- +++ @@ -1,3 +1,4 @@+"""Script to run end-to-end evaluation on the benchmark""" import argparse import logging from pathlib import Path @@ -199,6 +200,11 @@ def _messages_from_trajectory(trajectory: Trajectory) -> list[Message]: + """Build a list of messages from the trajectory. + We don't have to inclu...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/eval_run.py
Document this module using docstrings
# answer string match import logging import time from abc import ABC from typing import Union import evaluate # type: ignore[import] from beartype import beartype from beartype.door import is_bearable from evals.elsuite.multistep_web_tasks.webarena.bash_browser_env.bash_browser_env import ( BashBrowserEnv, ) fr...
--- +++ @@ -1,3 +1,4 @@+"""base class for evaluation""" # answer string match import logging @@ -75,6 +76,8 @@ def get_answer_from_action(action: Action) -> str: + """This is kind of a hack to make BrowserAction and BashAction + play nicely together""" if isinstance(action, BrowserAction): r...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/evaluation_harness/evaluators.py
Provide clean and structured docstrings
import json from typing import Any from urllib.parse import urlparse import requests from beartype import beartype from playwright.sync_api import Page from evals.completion_fns.openai import OpenAIChatCompletionFn from evals.elsuite.multistep_web_tasks.webarena.browser_env.env_config import ACCOUNTS, SHOPPING @bea...
--- +++ @@ -1,3 +1,4 @@+"""Implements helper functions to assist evaluation cases where other evaluators are not suitable.""" import json from typing import Any from urllib.parse import urlparse @@ -28,6 +29,7 @@ @beartype def shopping_get_latest_order_url() -> str: + """Get the latest order url from the shopp...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/evaluation_harness/helper_functions.py
Document this module using docstrings
import json import logging import time from abc import ABC from typing import Optional, TypedDict import docker from evals.elsuite.multistep_web_tasks.constants import ( AVAILABLE_SERVICES, DEFAULT_PORT_MAPPINGS, DOCKER_NAMES, FLASK_API_KEY, SERVICE_TO_URL, URL_TO_SERVICE, ) from evals.elsuit...
--- +++ @@ -1,3 +1,7 @@+"""This file contains classes that represent Playwright objects. +Calling methods on them should call the associated Playwright method +through the Flask app. +""" import json import logging @@ -29,12 +33,15 @@ class Forwarder(ABC): + """Class to represent all objects that call + t...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/multistep_web_tasks/webarena/core/playwright_api.py
Provide docstrings following PEP 257
import random from typing import Any, Dict, Sequence import evals.metrics from evals.api import DummyCompletionFn from evals.elsuite.sandbagging.mmlu_eval import MMLU from evals.elsuite.sandbagging.utils import ( compute_sandbagging_score, get_difficulty, get_violation_events, ) from evals.eval import Solv...
--- +++ @@ -14,6 +14,10 @@ class Sandbagging(SolverEval): + """ + Runs MMLU eval multiple times with different sandbagging targets. args and + kwargs pass initialization vars to the MMLU class + """ def __init__( self, @@ -40,6 +44,9 @@ self.evals.append(mmlu_eval) def...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/sandbagging/sandbagging_eval.py
Add docstrings that explain purpose and usage
import os from pathlib import Path from typing import Sequence import matplotlib.pyplot as plt import pandas as pd from evals.elsuite.sandbagging.utils import compute_sandbagging_score from evals.utils import log_utils def extract_results(datadir: Path) -> pd.DataFrame: df_rows = [] for path, results in log...
--- +++ @@ -27,6 +27,9 @@ def compute_all_model_scores(model_log: dict, thresh_targets: bool) -> float: + """ + Given log, calculate sandbagging score for each model + """ # Find min baseline if thresh_targets: baselines = [model_log[name]["baseline"] for name in model_log.keys()] @@ -55...
https://raw.githubusercontent.com/openai/evals/HEAD/evals/elsuite/sandbagging/scripts/utils.py