instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Generate helpful docstrings for debugging | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -56,6 +56,14 @@
def snake_to_lower_camel(snake_case_string: str):
+ """Converts a snake_case string to a lower_camel_case string.
+
+ Args:
+ snake_case_string: The input snake_case string.
+
+ Returns:
+ The lower_camel_case string.
+ """
if "_" not in snake_case_string:
return sna... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/tools/openapi_tool/openapi_spec_parser/rest_api_tool.py |
Write Python docstrings for this snippet | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Utilities for Output Schema.
+
+This module is for ADK internal use only.
+Please do not rely on the implementation details.
+"""
from __future__ import annotations
@@ -24,6 +29,... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/output_schema_utils.py |
Help me write clear docstrings | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -35,12 +35,14 @@
class Capabilities(Enum):
+ """Capabilities indicating what type of operation tools are allowed to be performed on Spanner."""
DATA_READ = "data_read"
"""Read only data operations tools are allowed."""
class QueryResultMode(Enum):
+ """Settings for Spanner execute sql quer... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/tools/spanner/settings.py |
Document all public functions with docstrings | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Utilities for ADK context management.
+
+This module is for ADK internal use only.
+Please do not rely on the implementation details.
+"""
from __future__ import annotations
@@ -... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/context_utils.py |
Help me add docstrings to my project | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Cache performance analysis utilities for ADK context caching system.
+
+This module provides tools to analyze cache performance metrics from event
+history, including hit ratios, cost... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/cache_performance_analyzer.py |
Add docstrings to improve collaboration | from __future__ import annotations
import logging
import os
from collections.abc import Callable
from pathlib import Path
from typing import TYPE_CHECKING, Any
import torch
import transformers
from packaging import version
from torch import Tensor, nn
from torch.optim import Optimizer
from torch.utils.data import Dat... | --- +++ @@ -37,6 +37,14 @@
class SaveModelCallback(TrainerCallback):
+ """A Callback to save the model to the `output_dir`.
+
+ If save_best_model is True and evaluator is defined, then we save on evaluate, but only if the new model is
+ better than the currently saved one according to the evaluator.
+
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/fit_mixin.py |
Generate docstrings with parameter types | from __future__ import annotations
from typing import Literal
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class BaseWeightingScheme(nn.Module):
def __init__(self, *args, **kwargs) -> None:
super(... | --- +++ @@ -10,27 +10,53 @@
class BaseWeightingScheme(nn.Module):
+ """Base class for implementing weighting schemes in LambdaLoss."""
def __init__(self, *args, **kwargs) -> None:
+ """"""
super().__init__(*args, **kwargs)
def forward(self, gain: Tensor, discount: Tensor, true_sorte... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/LambdaLoss.py |
Add documentation for all methods | # Copyright 2026 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing... | --- +++ @@ -1,5 +1,6 @@ # Copyright 2026 Google LLC
#
+"""Spanner Admin Tool."""
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -27,6 +28,25 @@ project_id: str,
credentials: Credentials,
) -> dict[str, Any]:
+ """List Spanner instances within a project.
+
+ Args:
+ project_... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/tools/spanner/admin_tool.py |
Create simple docstrings for beginners | from __future__ import annotations
from torch import nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.losses.PListMLELoss import PListMLELoss
class ListMLELoss(PListMLELoss):
def __init__(
self,
model: CrossEncoder,
activation_fn: n... | --- +++ @@ -14,6 +14,85 @@ mini_batch_size: int | None = None,
respect_input_order: bool = True,
) -> None:
+ """
+ This loss function implements the ListMLE learning to rank algorithm, which uses a list-wise
+ approach based on maximum likelihood estimation of permutations. I... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/ListMLELoss.py |
Create docstrings for API functions | from __future__ import annotations
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.util import fullname
class MarginMSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fn: nn.Module = nn.Identity(),... | --- +++ @@ -9,6 +9,79 @@
class MarginMSELoss(nn.Module):
def __init__(self, model: CrossEncoder, activation_fn: nn.Module = nn.Identity(), **kwargs) -> None:
+ """
+ Computes the MSE loss between ``|sim(Query, Pos) - sim(Query, Neg)|`` and ``|gold_sim(Query, Pos) - gold_sim(Query, Neg)|``.
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/MarginMSELoss.py |
Create documentation for each function signature | from __future__ import annotations
from collections.abc import Iterator
from contextlib import nullcontext
from functools import partial
import torch
import tqdm
from torch import Tensor, nn
from torch.utils.checkpoint import get_device_states, set_device_states
from sentence_transformers.cross_encoder.CrossEncoder ... | --- +++ @@ -14,6 +14,12 @@
class RandContext:
+ """
+ Random-state context manager class. Reference: https://github.com/luyug/GradCache.
+
+ This class will back up the pytorch's random state during initialization. Then when the context is activated,
+ the class will set up the random state with the bac... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/CachedMultipleNegativesRankingLoss.py |
Generate helpful docstrings for debugging | from __future__ import annotations
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class ListNetLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
activation_fn: nn.Module | None... | --- +++ @@ -14,6 +14,78 @@ activation_fn: nn.Module | None = nn.Identity(),
mini_batch_size: int | None = None,
) -> None:
+ """
+ ListNet loss for learning to rank. This loss function implements the ListNet ranking algorithm
+ which uses a list-wise approach to learn ranking ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/ListNetLoss.py |
Add docstrings to incomplete code | from __future__ import annotations
import torch
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class PListMLELambdaWeight(nn.Module):
def __init__(self, rank_discount_fn=None) -> None:
super().__init__()
... | --- +++ @@ -8,12 +8,29 @@
class PListMLELambdaWeight(nn.Module):
+ """Base class for implementing weighting schemes in Position-Aware ListMLE Loss."""
def __init__(self, rank_discount_fn=None) -> None:
+ """
+ Initialize a lambda weight for PListMLE loss.
+
+ Args:
+ rank_d... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/PListMLELoss.py |
Add docstrings to improve code quality | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Utilities for environment variable handling.
+
+This module is for ADK internal use only.
+Please do not rely on the implementation details.
+"""
from __future__ import annotations... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/env_utils.py |
Create documentation strings for testing functions | from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any
from huggingface_hub import ModelCard
from sentence_transformers.model_card import SentenceTransformerModelCardCallback, SentenceTransformerModelCardData
from sent... | --- +++ @@ -26,6 +26,45 @@
@dataclass
class CrossEncoderModelCardData(SentenceTransformerModelCardData):
+ """A dataclass storing data used in the model card.
+
+ Args:
+ language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
+ e.g. "en" or ["en", "de", "... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/model_card.py |
Add docstrings to meet PEP guidelines | from __future__ import annotations
from typing import Literal
from torch import nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.losses import LambdaLoss, NoWeightingScheme
class RankNetLoss(LambdaLoss):
def __init__(
self,
model: CrossEnc... | --- +++ @@ -19,6 +19,75 @@ activation_fn: nn.Module | None = nn.Identity(),
mini_batch_size: int | None = None,
) -> None:
+ """
+ RankNet loss implementation for learning to rank. This loss function implements the RankNet algorithm,
+ which learns a ranking function by optimi... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/losses/RankNetLoss.py |
Improve my code by adding docstrings | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -36,6 +36,52 @@ settings: SpannerToolSettings,
tool_context: ToolContext,
) -> dict:
+ """Run a Spanner Read-Only query in the spanner database and return the result.
+
+ Args:
+ project_id (str): The GCP project id in which the spanner database
+ resides.
+ instance_id (str): Th... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/tools/spanner/query_tool.py |
Document this module using docstrings |
from __future__ import annotations
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
def __init__(self, sentenc... | --- +++ @@ -1,3 +1,13 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/datasets/DenoisingAutoEncoderDataset.py |
Write docstrings that follow conventions |
from __future__ import annotations
import math
import random
class NoDuplicatesDataLoader:
def __init__(self, train_examples, batch_size):
self.batch_size = batch_size
self.data_pointer = 0
self.collate_fn = None
self.train_examples = train_examples
random.shuffle(self.tr... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/datasets/NoDuplicatesDataLoader.py |
Add return value explanations in docstrings | from __future__ import annotations
import logging
from collections.abc import Callable
from dataclasses import dataclass, field
from typing import Any
import torch
logger = logging.getLogger(__name__)
@dataclass
class SentenceTransformerDataCollator:
tokenize_fn: Callable
valid_label_columns: list[str] = ... | --- +++ @@ -12,6 +12,16 @@
@dataclass
class SentenceTransformerDataCollator:
+ """Collator for a SentenceTransformers model.
+ This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
+ This works with the two text dataset that is used as the example in the training overview:... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/data_collator.py |
Write clean docstrings for readability | from __future__ import annotations
import logging
import os
from collections.abc import Callable
from functools import partial
from typing import Any
import torch
from packaging.version import parse as parse_version
from torch import nn
from transformers import EvalPrediction, PreTrainedTokenizerBase, TrainerCallback... | --- +++ @@ -30,6 +30,74 @@
class CrossEncoderTrainer(SentenceTransformerTrainer):
+ """
+ CrossEncoderTrainer is a simple but feature-complete training and eval loop for PyTorch
+ based on the 🤗 Transformers :class:`~transformers.Trainer`.
+
+ This trainer integrates support for various :class:`transfo... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/trainer.py |
Improve my code by adding docstrings |
from __future__ import annotations
import gzip
import logging
import random
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class ParallelSentencesDataset(Dataset):
def __in... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/datasets/ParallelSentencesDataset.py |
Write docstrings for backend logic | from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING, Literal
import numpy as np
from sklearn.metrics import average_precision_score, matthews_corrcoef
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.reader... | --- +++ @@ -25,6 +25,62 @@
class BinaryClassificationEvaluator(SentenceEvaluator):
+ """
+ Evaluate a model based on the similarity of the embeddings by calculating the accuracy of identifying similar and
+ dissimilar sentences.
+ The metrics are the cosine similarity, dot score, Euclidean and Manhattan... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/evaluation/BinaryClassificationEvaluator.py |
Add docstrings to meet PEP guidelines | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -26,6 +26,11 @@
class StreamingResponseAggregator:
+ """Aggregates partial streaming responses.
+
+ It aggregates content from partial responses, and generates LlmResponses for
+ individual (partial) model responses, as well as for aggregated content.
+ """
def __init__(self) -> None:
self.... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/streaming_utils.py |
Write reusable docstrings |
from __future__ import annotations
from torch.utils.data import Dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.readers.InputExample import InputExample
class SentencesDataset(Dataset):
def __init__(self, examples: list[InputExample], model: SentenceTransformer):
... | --- +++ @@ -1,3 +1,10 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/datasets/SentencesDataset.py |
Add clean documentation to messy code |
from __future__ import annotations
import logging
import numpy as np
from torch.utils.data import IterableDataset
from sentence_transformers.readers import InputExample
logger = logging.getLogger(__name__)
class SentenceLabelDataset(IterableDataset):
def __init__(self, examples: list[InputExample], samples_... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/datasets/SentenceLabelDataset.py |
Add docstrings that explain purpose and usage | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Utilities for Google LLM variants.
+
+This module is for ADK internal use only.
+Please do not rely on the implementation details.
+"""
from __future__ import annotations
@@ -24,... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/variant_utils.py |
Help me add docstrings to my project | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -12,6 +12,11 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Utilities for Vertex AI. Includes helper functions for Express Mode.
+
+This module is for ADK internal use only.
+Please do not rely on the implementation details.
+"""
from __fut... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/vertex_ai_utils.py |
Expand my code with proper documentation strings | from __future__ import annotations
import csv
import logging
import os
from collections.abc import Callable
from typing import TYPE_CHECKING
import numpy as np
import torch
import tqdm
from sklearn.metrics import average_precision_score, ndcg_score
from sentence_transformers.evaluation.SentenceEvaluator import Sente... | --- +++ @@ -24,6 +24,68 @@
class RerankingEvaluator(SentenceEvaluator):
+ """
+ This class evaluates a SentenceTransformer model for the task of re-ranking.
+
+ Given a query and a list of documents, it computes the score [query, doc_i] for all possible
+ documents and sorts them in decreasing order. Th... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/evaluation/RerankingEvaluator.py |
Fill in missing docstrings in my code | from __future__ import annotations
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET) -> None:
super().__init__(level)
def emit(self, record) -> None:
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
... | --- +++ @@ -21,9 +21,11 @@
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s") -> None:
+ """Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
+ """Creates a new 'notice' logging level"... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/LoggingHandler.py |
Add professional docstrings to my codebase | from __future__ import annotations
import copy
import importlib
import inspect
import json
import logging
import math
import os
import queue
import shutil
import sys
import tempfile
import traceback
import warnings
from collections import OrderedDict
from collections.abc import Callable, Iterable, Iterator
from contex... | --- +++ @@ -59,6 +59,108 @@
class SentenceTransformer(nn.Sequential, FitMixin, PeftAdapterMixin):
+ """
+ Loads or creates a SentenceTransformer model that can be used to map sentences / text to embeddings.
+
+ Args:
+ model_name_or_path (str, optional): If it is a filepath on disk, it loads the mod... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/SentenceTransformer.py |
Document my Python code with docstrings | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import pytorch_cos_sim
class BatchHardTripletLossDistanceFunction:
@staticmethod
def c... | --- +++ @@ -10,13 +10,24 @@
class BatchHardTripletLossDistanceFunction:
+ """This class defines distance functions, that can be used with Batch[All/Hard/SemiHard]TripletLoss"""
@staticmethod
def cosine_distance(embeddings: Tensor) -> Tensor:
+ """Compute the 2D matrix of cosine distances (1-co... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/BatchHardTripletLoss.py |
Help me document legacy Python code | from __future__ import annotations
import json
import logging
from pathlib import Path
from transformers.configuration_utils import PretrainedConfig
from sentence_transformers.backend.utils import _save_pretrained_wrapper, backend_should_export, backend_warn_to_save
logger = logging.getLogger(__name__)
def load_o... | --- +++ @@ -12,6 +12,16 @@
def load_onnx_model(model_name_or_path: str, config: PretrainedConfig, task_name: str, **model_kwargs):
+ """
+ Load and perhaps export an ONNX model using the Optimum library.
+
+ Args:
+ model_name_or_path (str): The model name on Hugging Face (e.g. 'naver/splade-coconde... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/backend/load.py |
Document this code for team use | from __future__ import annotations
import copy
import math
import random
from collections.abc import Iterable
import numpy as np
import torch
from torch import Tensor, nn
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_tra... | --- +++ @@ -15,6 +15,114 @@
class ContrastiveTensionLoss(nn.Module):
+ """
+ This loss expects only single sentences, without any labels. Positive and negative pairs are automatically created via random sampling,
+ such that a positive pair consists of two identical sentences and a negative pair consists o... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/ContrastiveTensionLoss.py |
Help me add docstrings to my project | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
class BatchHardSoftMarginTripletLoss(... | --- +++ @@ -14,6 +14,77 @@ def __init__(
self, model: SentenceTransformer, distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance
) -> None:
+ """
+ BatchHardSoftMarginTripletLoss takes a batch with (sentence, label) pairs and computes the loss for all possible, valid
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/BatchHardSoftMarginTripletLoss.py |
Generate documentation strings for clarity | from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
class BatchAllTripletLoss(nn.Module):
def ... | --- +++ @@ -16,6 +16,74 @@ distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance,
margin: float = 5,
) -> None:
+ """
+ BatchAllTripletLoss takes a batch with (sentence, label) pairs and computes the loss for all possible, valid
+ triplets, i.e., anchor and p... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/BatchAllTripletLoss.py |
Replace inline comments with docstrings | from __future__ import annotations
import logging
import os
import shutil
import tempfile
from collections.abc import Callable
from fnmatch import fnmatch
from pathlib import Path
from typing import TYPE_CHECKING, Any
import huggingface_hub
from huggingface_hub import list_repo_files
if TYPE_CHECKING:
from sente... | --- +++ @@ -19,6 +19,16 @@
def _save_pretrained_wrapper(_save_pretrained_fn: Callable, subfolder: str) -> Callable[..., None]:
+ """
+ Wraps the save_pretrained method of a model to save to a subfolder.
+
+ Args:
+ _save_pretrained_fn: The original save_pretrained function
+ subfolder: The su... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/backend/utils.py |
Create documentation strings for testing functions | # Copyright 2026 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s... | --- +++ @@ -28,6 +28,17 @@
def load_yaml_file(file_path: Union[str, Path]) -> Any:
+ """Loads a YAML file and returns its content.
+
+ Args:
+ file_path: Path to the YAML file.
+
+ Returns:
+ The content of the YAML file.
+
+ Raises:
+ FileNotFoundError: If the file_path does not exist.
+ """
file_... | https://raw.githubusercontent.com/google/adk-python/HEAD/src/google/adk/utils/yaml_utils.py |
Generate NumPy-style docstrings | from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
EUCLIDEAN = lambda x, y... | --- +++ @@ -11,6 +11,7 @@
class SiameseDistanceMetric(Enum):
+ """The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
@@ -25,6 +26,58 @@ margin: float = 0.5,
size_average: bool = True,
... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/ContrastiveLoss.py |
Add clean documentation to messy code | from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .BatchHardTripletLoss import BatchHardTripletLossDistanceFunction
class BatchSemiHardTripletLoss(nn.Module):
def __in... | --- +++ @@ -17,6 +17,83 @@ distance_metric=BatchHardTripletLossDistanceFunction.eucledian_distance,
margin: float = 5,
) -> None:
+ """
+ BatchSemiHardTripletLoss takes a batch with (label, sentence) pairs and computes the loss for all possible, valid
+ triplets, i.e., anchor ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/BatchSemiHardTripletLoss.py |
Add docstrings that explain purpose and usage | from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import fullname
class CosineSimilarityLoss(nn.Module):
def __init__(... | --- +++ @@ -17,6 +17,59 @@ loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
+ """
+ CosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
+ vectors ``u = model(sentence_A)... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/CosineSimilarityLoss.py |
Help me document legacy Python code | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Literal
from sentence_transformers.backend.utils import save_or_push_to_hub_model
from sentence_transformers.util import disable_datasets_caching, is_datasets_available
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from... | --- +++ @@ -29,6 +29,36 @@ create_pr: bool = False,
file_suffix: str | None = None,
) -> None:
+ """
+ Export a quantized ONNX model from a SentenceTransformer, SparseEncoder, or CrossEncoder model.
+
+ This function applies dynamic quantization, i.e. without a calibration dataset.
+ Each of the d... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/backend/quantize.py |
Add docstrings that explain inputs and outputs | from __future__ import annotations
from collections.abc import Iterable, Iterator
from contextlib import nullcontext
from functools import partial
from typing import Any, Literal
import torch
import tqdm
from torch import Tensor, nn
from torch.utils.checkpoint import get_device_states, set_device_states
from transfor... | --- +++ @@ -17,6 +17,12 @@
class RandContext:
+ """
+ Random-state context manager class. Reference: https://github.com/luyug/GradCache.
+
+ This class will back up the pytorch's random state during initialization. Then when the context is activated,
+ the class will set up the random state with the bac... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/CachedGISTEmbedLoss.py |
Create docstrings for all classes and functions | from __future__ import annotations
import logging
import random
from collections.abc import Iterable, Sequence
from typing import Any
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.losses import (
CachedGISTEmbedLoss,
CachedMultipleNegativesRankingLoss,
... | --- +++ @@ -31,6 +31,13 @@
class ForwardDecorator:
+ """
+ This decorator is used to cache the output of the Sentence Transformer's forward pass,
+ so that it can be shrank and reused for multiple loss calculations. This prevents the
+ model from recalculating the embeddings for each desired Matryoshka ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/MatryoshkaLoss.py |
Insert docstrings into my code | from __future__ import annotations
import logging
import math
import os
import queue
import tempfile
import traceback
from collections.abc import Callable
from multiprocessing import Queue
from pathlib import Path
from typing import Any, Literal, overload
import numpy as np
import torch
import torch.multiprocessing a... | --- +++ @@ -52,6 +52,74 @@
class CrossEncoder(nn.Module, PushToHubMixin, FitMixin):
+ """
+ A CrossEncoder takes exactly two sentences / texts as input and either predicts
+ a score or label for this sentence pair. It can for example predict the similarity of the sentence pair
+ on a scale of 0 ... 1.
+... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/CrossEncoder.py |
Add docstrings including usage examples | from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
def __init__(self):
self.greater_is_better = T... | --- +++ @@ -11,6 +11,17 @@
class SentenceEvaluator:
+ """
+ Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
+ attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
+ for choosing the best c... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/evaluation/SentenceEvaluator.py |
Document functions with detailed explanations | from __future__ import annotations
import json
import logging
import random
import re
from collections import Counter, defaultdict
from copy import copy
from dataclasses import dataclass, field, fields
from pathlib import Path
from platform import python_version
from pprint import pformat
from textwrap import indent
f... | --- +++ @@ -264,6 +264,47 @@
@dataclass
class SentenceTransformerModelCardData(CardData):
+ """A dataclass storing data used in the model card.
+
+ Args:
+ language (`Optional[Union[str, List[str]]]`): The model language, either a string or a list,
+ e.g. "en" or ["en", "de", "nl"]
+ l... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/model_card.py |
Add docstrings for production code | from __future__ import annotations
import torch
from torch import Tensor
from sentence_transformers.models.Module import Module
class Pooling(Module):
POOLING_MODES = (
"cls",
"lasttoken",
"max",
"mean",
"mean_sqrt_len_tokens",
"weightedmean",
)
config_k... | --- +++ @@ -7,6 +7,38 @@
class Pooling(Module):
+ """
+ Performs pooling (max or mean) on the token embeddings.
+
+ Using pooling, it generates from a variable sized sentence a fixed sized sentence embedding. This layer also allows
+ to use the CLS token if it is returned by the underlying word embeddin... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/Pooling.py |
Add clean documentation to messy code | from __future__ import annotations
import json
import os
from pathlib import Path
try:
from typing import Self
except ImportError:
from typing_extensions import Self
from torch import Tensor, nn
from transformers.utils import logging
from sentence_transformers.models.InputModule import InputModule
from sent... | --- +++ @@ -27,6 +27,147 @@ def __init__(
self, sub_modules: dict[str, list[Module]], default_route: str | None = None, allow_empty_key: bool = True
) -> None:
+ r"""
+ This model allows to create asymmetric SentenceTransformer models that apply different modules depending on the specifi... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/Router.py |
Create documentation for each function signature | from __future__ import annotations
import csv
import logging
import os
from typing import TYPE_CHECKING
import torch
from torch.utils.data import DataLoader
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
from sentence_transformers.util import batch_to_device
if TYPE_CHECKING:
f... | --- +++ @@ -18,8 +18,21 @@
class LabelAccuracyEvaluator(SentenceEvaluator):
+ """
+ Evaluate a model based on its accuracy on a labeled dataset
+
+ This requires a model with LossFunction.SOFTMAX
+
+ The results are written in a CSV. If a CSV already exists, then values are appended.
+ """
def... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/evaluation/LabelAccuracyEvaluator.py |
Fill in missing docstrings in my code | from __future__ import annotations
import json
import os
from abc import ABC, abstractmethod
from typing import Any
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from safetensors.torch import load_file as load_safetensors_file
from safetensors.torch import l... | --- +++ @@ -19,6 +19,37 @@
class Module(ABC, torch.nn.Module):
+ """
+ Base class for all modules in the Sentence Transformers library.
+
+ This class provides a common interface for all modules, including methods for loading and saving the module's
+ configuration and weights. It also provides a method... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/Module.py |
Help me comply with documentation standards | from __future__ import annotations
import random
import warnings
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from sentence_transformers import SentenceTransformer
from sentence_transformers.losses.CachedGISTEmbedLoss impor... | --- +++ @@ -19,6 +19,12 @@
class TransformerDecorator:
+ """
+ Decorator that caches the embeddings of all layers of the transformer.
+ When `layer_idx` is set, it returns the cached embeddings of that layer instead.
+
+ This is meant to override the forward function of the Transformer.
+ """
... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/AdaptiveLayerLoss.py |
Document this module using docstrings | from __future__ import annotations
import json
import logging
import os
import shutil
from collections.abc import Callable, Iterable
from pathlib import Path
from typing import TYPE_CHECKING, Any
import numpy as np
import torch
import transformers
from packaging import version
from torch import Tensor, nn
from torch.... | --- +++ @@ -42,6 +42,15 @@
class SaveModelCallback(TrainerCallback):
+ """A Callback to save the model to the `output_dir`.
+
+ There are two cases:
+ 1. save_best_model is True and evaluator is defined:
+ We save on evaluate, but only if the new model is better than the currently saved one
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/fit_mixin.py |
Document all public functions with docstrings |
from __future__ import annotations
import gzip
import os
from . import InputExample
class NLIDataReader:
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + file... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/readers/NLIDataReader.py |
Write reusable docstrings | from __future__ import annotations
from typing_extensions import deprecated
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder.evaluation.classification import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.evaluation.correlation import CrossEncoderCo... | --- +++ @@ -14,6 +14,9 @@ "evaluation. It accepts approximately the same inputs as this evaluator."
)
class CEBinaryAccuracyEvaluator(CrossEncoderClassificationEvaluator):
+ """
+ This evaluator has been deprecated in favor of the more general CrossEncoderClassificationEvaluator.
+ """
@classmetho... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/cross_encoder/evaluation/deprecated.py |
Generate docstrings for this script | from __future__ import annotations
from collections.abc import Iterable
from typing import Any
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: Senten... | --- +++ @@ -12,6 +12,67 @@
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
+ """
+ This class implements CoSENT (Consistent SENTence embedding) loss.
+ It expects that each of the InputExamples co... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/CoSENTLoss.py |
Document functions with detailed explanations | from __future__ import annotations
from collections.abc import Iterable
from typing import Any, Literal
import torch
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import cos_sim
class GlobalOrthogonalRegularizationLoss(nn.Modu... | --- +++ @@ -19,6 +19,106 @@ second_moment_weight: float | None = 1.0,
aggregation: Literal["mean", "sum"] = "mean",
) -> None:
+ """
+ Global Orthogonal Regularization (GOR) Loss that encourages embeddings to be well-distributed
+ in the embedding space by penalizing high mean... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/GlobalOrthogonalRegularizationLoss.py |
Generate consistent documentation across files |
from __future__ import annotations
import csv
import os
from . import InputExample
class TripletReader:
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
s3_col_idx=2,
has_header=False,
delimiter="\t",
quoting=csv.QUOTE_NONE,
):... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/readers/TripletReader.py |
Add docstrings following best practices | from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.util import pairwise_cos_sim, pairwise... | --- +++ @@ -12,6 +12,7 @@
class TripletDistanceMetric(Enum):
+ """The metric for the triplet loss"""
COSINE = lambda x, y: 1 - pairwise_cos_sim(x, y)
EUCLIDEAN = lambda x, y: pairwise_euclidean_sim(x, y)
@@ -22,6 +23,57 @@ def __init__(
self, model: SentenceTransformer, distance_metric=T... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/TripletLoss.py |
Add docstrings to incomplete code | from __future__ import annotations
import logging
from collections.abc import Callable, Iterable, Iterator
from contextlib import nullcontext
from functools import partial
from typing import Any, Literal
import torch
import tqdm
from torch import Tensor, nn
from torch.utils.checkpoint import get_device_states, set_de... | --- +++ @@ -20,6 +20,12 @@
class RandContext:
+ """
+ Random-state context manager class. Reference: https://github.com/luyug/GradCache.
+
+ This class will back up the pytorch's random state during initialization. Then when the context is activated,
+ the class will set up the random state with the bac... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/losses/CachedMultipleNegativesRankingLoss.py |
Document functions with clear intent | from __future__ import annotations
import logging
import torch
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
logger = logging.getLogger(__name__)
class WordWeights(Module):
config_keys: list[str] = ["vocab", "word_weights", "unknown_word_weight"]
def __init__(self, ... | --- +++ @@ -11,10 +11,21 @@
class WordWeights(Module):
+ """This model can weight word embeddings, for example, with idf-values."""
config_keys: list[str] = ["vocab", "word_weights", "unknown_word_weight"]
def __init__(self, vocab: list[str], word_weights: dict[str, float], unknown_word_weight: flo... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/WordWeights.py |
Add clean documentation to messy code | from __future__ import annotations
import logging
import os
from abc import ABC, abstractmethod
from collections import defaultdict, deque
from collections.abc import Iterator
from itertools import accumulate, cycle
from typing import Any
import numpy as np
import torch
from torch.utils.data import BatchSampler, Conc... | --- +++ @@ -30,6 +30,10 @@
class SetEpochMixin:
+ """
+ Required for a BatchSampler as the Trainer will call set_epoch on the BatchSampler at the beginning of each epoch.
+ The BatchSampler can then set the generator seed accordingly.
+ """
def __init__(self, *args, **kwargs) -> None:
su... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sampler.py |
Generate NumPy-style docstrings | from __future__ import annotations
from collections.abc import Callable
from enum import Enum
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise... | --- +++ @@ -19,6 +19,14 @@
class SimilarityFunction(Enum):
+ """
+ Enum class for supported similarity functions. The following functions are supported:
+
+ - ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
+ - ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/similarity_functions.py |
Generate documentation strings for clarity | from __future__ import annotations
import logging
from collections.abc import Callable, Iterable, Iterator
from contextlib import contextmanager
from typing import Any, Literal
import numpy as np
import numpy.typing as npt
import torch
from torch import Tensor, nn
from tqdm import trange
from transformers import Auto... | --- +++ @@ -25,6 +25,108 @@
class SparseEncoder(SentenceTransformer):
+ """
+ Loads or creates a SparseEncoder model that can be used to map sentences / text to sparse embeddings.
+
+ Args:
+ model_name_or_path (str, optional): If it is a filepath on disk, it loads the model from that path. If it is... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/SparseEncoder.py |
Add docstrings that explain logic |
from __future__ import annotations
import csv
import gzip
import os
from . import InputExample
class STSDataReader:
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
score_col_idx=2,
delimiter="\t",
quoting=csv.QUOTE_NONE,
normalize... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/readers/STSDataReader.py |
Add verbose docstrings with examples | from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.S... | --- +++ @@ -13,6 +13,11 @@
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
+ """
+ :param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
+ :param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/losses/CSRLoss.py |
Add return value explanations in docstrings | from __future__ import annotations
import logging
import os
from collections import defaultdict
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
import torch
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import append_to_last_row,... | --- +++ @@ -22,6 +22,116 @@
class SparseInformationRetrievalEvaluator(InformationRetrievalEvaluator):
+ """
+ This evaluator extends :class:`~sentence_transformers.evaluation.InformationRetrievalEvaluator` but is specifically designed for sparse encoder models.
+
+ This class evaluates an Information Retri... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/evaluation/SparseInformationRetrievalEvaluator.py |
Help me add docstrings to my project | from __future__ import annotations
import logging
from collections.abc import Iterable, Iterator
from contextlib import nullcontext
from functools import partial
from typing import Any
import torch
import tqdm
from torch import Tensor, nn
from sentence_transformers.losses.CachedMultipleNegativesRankingLoss import Ra... | --- +++ @@ -32,6 +32,84 @@ mini_batch_size: int = 32,
show_progress_bar: bool = False,
):
+ """
+ Cached version of :class:`SpladeLoss` that uses the GradCache technique to allow for much larger
+ effective batch sizes without additional GPU memory usage.
+
+ By perform... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/losses/CachedSpladeLoss.py |
Write Python docstrings for this snippet | from __future__ import annotations
import logging
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses import FlopsLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class Splade... | --- +++ @@ -25,6 +25,82 @@ query_regularizer_threshold: int | None = None,
use_document_regularizer_only: bool = False,
):
+ """
+ SpladeLoss implements the loss function for the SPLADE (Sparse Lexical and Expansion) model,
+ which combines a main loss function with regulariza... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/losses/SpladeLoss.py |
Add docstrings to improve collaboration | from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.models.Module import Module
class TiedTranspose(nn.Module):
def __init__(self, linear: nn.... | --- +++ @@ -31,6 +31,24 @@
class SparseAutoEncoder(Module):
+ """
+ This module implements the Sparse AutoEncoder architecture based on the paper:
+ Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation, https://huggingface.co/papers/2503.01776
+
+ This module transforms dense embeddin... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/models/SparseAutoEncoder.py |
Add docstrings explaining edge cases | from __future__ import annotations
import inspect
import logging
import os
import re
from collections import OrderedDict
from collections.abc import Callable
from contextlib import nullcontext
from functools import partial
from typing import TYPE_CHECKING, Any
import torch
from packaging.version import parse as parse... | --- +++ @@ -58,6 +58,74 @@
class SentenceTransformerTrainer(Trainer):
+ """
+ SentenceTransformerTrainer is a simple but feature-complete training and eval loop for PyTorch
+ based on the 🤗 Transformers :class:`~transformers.Trainer`.
+
+ This trainer integrates support for various :class:`transformers... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/trainer.py |
Document this code for team use | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.backend import load_onnx_model, load_openvino_model
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from transformers import AutoConfig, AutoMod... | --- +++ @@ -24,6 +24,34 @@
class MLMTransformer(InputModule):
+ """
+ MLMTransformer adapts a Masked Language Model (MLM) for sparse encoding applications.
+
+ This class extends the Transformer class to work specifically with models that have a
+ MLM head (like BERT, RoBERTa, etc.) and is designed to b... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/models/MLMTransformer.py |
Provide docstrings following PEP 257 | from __future__ import annotations
import logging
import time
from typing import TYPE_CHECKING, Any
import numpy as np
import torch
from tqdm import tqdm
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
try:
from elasticsearch import Elasticsearch
except ImportError:
pass
try:
... | --- +++ @@ -40,6 +40,24 @@ tuple[list[list[dict[str, int | float]]], float]
| tuple[list[list[dict[str, int | float]]], float, tuple[QdrantClient, str]]
):
+ """
+ Performs semantic search using sparse embeddings with Qdrant.
+
+ Args:
+ query_embeddings: PyTorch COO sparse tensor containing q... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/search_engines.py |
Provide clean and structured docstrings | from __future__ import annotations
import logging
import os
from collections.abc import Callable
from typing import Any
import torch
from packaging.version import parse as parse_version
from torch import nn
from transformers import EvalPrediction, PreTrainedTokenizerBase, TrainerCallback
from transformers import __ve... | --- +++ @@ -30,6 +30,73 @@
class SparseEncoderTrainer(SentenceTransformerTrainer):
+ """
+ SparseEncoderTrainer is a simple but feature-complete training and eval loop for PyTorch
+ based on the SentenceTransformerTrainer that based on 🤗 Transformers :class:`~transformers.Trainer`.
+
+ This trainer int... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/trainer.py |
Annotate my code with docstrings | from __future__ import annotations
import logging
from typing import Literal
import torch
from sentence_transformers.models.Module import Module
logger = logging.getLogger(__name__)
class SpladePooling(Module):
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
config_ke... | --- +++ @@ -11,6 +11,32 @@
class SpladePooling(Module):
+ """
+ SPLADE Pooling module for creating the sparse embeddings.
+
+ This module implements the SPLADE pooling mechanism that:
+
+ 1. Takes token logits from a masked language model (MLM).
+ 2. Applies a sparse transformation using an activatio... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/models/SpladePooling.py |
Document my Python code with docstrings | from __future__ import annotations
import inspect
import logging
import math
import os
from pathlib import Path
from typing import Any
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import numpy as np
import torch
from safetensors.torch import save_file as save_safetensor... | --- +++ @@ -33,6 +33,50 @@ embedding_dim: int | None = None,
**kwargs,
) -> None:
+ """
+ Initializes the StaticEmbedding model given a tokenizer. The model is a simple embedding bag model that
+ takes the mean of trained per-token embeddings to compute text embeddings.
+
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/StaticEmbedding.py |
Write beginner-friendly docstrings | from __future__ import annotations
import torch
import torch.distributed as dist
from transformers.utils import logging
# NOTE: transformers wraps the regular logging module for e.g. warning_once
logger = logging.get_logger(__name__)
def all_gather(tensor: torch.Tensor, with_grad: bool = False) -> torch.Tensor:
... | --- +++ @@ -9,6 +9,18 @@
def all_gather(tensor: torch.Tensor, with_grad: bool = False) -> torch.Tensor:
+ """
+ Gathers a tensor from each distributed rank into a list. Always retains gradients for the local rank's tensor,
+ and optionally retains gradients for the gathered tensors if `with_grad` is True.
... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/distributed.py |
Improve documentation using docstrings | from __future__ import annotations
import importlib
import logging
import os
from importlib.metadata import PackageNotFoundError, metadata
import torch
from transformers import is_torch_npu_available
logger = logging.getLogger(__name__)
def get_device_name() -> str:
if torch.cuda.is_available():
if "LO... | --- +++ @@ -12,6 +12,15 @@
def get_device_name() -> str:
+ """
+ Returns the name of the device where this module is running on.
+
+ This function only supports single device or basic distributed training setups.
+ In distributed mode for cuda device, it uses the rank to assign a specific CUDA device.
+... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/environment.py |
Generate docstrings with examples | from __future__ import annotations
import logging
import time
from typing import TYPE_CHECKING, Literal
import numpy as np
from torch import Tensor
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
import faiss
import usearch
def semantic_search_faiss(
query_embeddings: np.ndarray,
corpus_em... | --- +++ @@ -28,6 +28,67 @@ exact: bool = True,
output_index: bool = False,
) -> tuple[list[list[dict[str, int | float]]], float, faiss.Index]:
+ """
+ Performs semantic search using the FAISS library.
+
+ Rescoring will be performed if:
+ 1. `rescore` is True
+ 2. The query embeddings are not q... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/quantization.py |
Create simple docstrings for beginners | from __future__ import annotations
from functools import wraps
from transformers.integrations.peft import PeftAdapterMixin as PeftAdapterMixinTransformers
def peft_wrapper(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.check_peft_compatible_model()
method = getattr(self.transf... | --- +++ @@ -6,6 +6,7 @@
def peft_wrapper(func):
+ """Wrapper to call the method on the auto_model with a check for PEFT compatibility."""
@wraps(func)
def wrapper(self, *args, **kwargs):
@@ -17,6 +18,15 @@
class PeftAdapterMixin:
+ """
+ Wrapper Mixin that adds the functionality to easily l... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/peft_mixin.py |
Add docstrings with type hints explained | from __future__ import annotations
import os
from pathlib import Path
from huggingface_hub import hf_hub_download, snapshot_download
from tqdm.autonotebook import tqdm
class disabled_tqdm(tqdm):
def __init__(self, *args, **kwargs):
kwargs["disable"] = True
super().__init__(*args, **kwargs)
... | --- +++ @@ -8,12 +8,18 @@
class disabled_tqdm(tqdm):
+ """
+ Class to override `disable` argument in case progress bars are globally disabled.
+
+ Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
+ """
def __init__(self, *args, **kwargs):
kwargs["disable"] = Tru... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/file_io.py |
Help me add docstrings to my project | from __future__ import annotations
import numpy as np
import torch
from sklearn.metrics import pairwise_distances
from torch import Tensor
from transformers.utils import logging
from .tensor import _convert_to_batch_tensor, _convert_to_tensor, normalize_embeddings, to_scipy_coo
# NOTE: transformers wraps the regular... | --- +++ @@ -13,10 +13,30 @@
def pytorch_cos_sim(a: Tensor, b: Tensor) -> Tensor:
+ """
+ Computes the cosine similarity between two tensors.
+
+ Args:
+ a (Union[list, np.ndarray, Tensor]): The first tensor.
+ b (Union[list, np.ndarray, Tensor]): The second tensor.
+
+ Returns:
+ Te... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/similarity.py |
Write clean docstrings for readability | from __future__ import annotations
from typing import Any, overload
import numpy as np
import torch
from scipy.sparse import coo_matrix
from torch import Tensor, device
def _convert_to_tensor(a: list | np.ndarray | Tensor) -> Tensor:
if isinstance(a, list):
# Check if list contains sparse tensors
... | --- +++ @@ -9,6 +9,16 @@
def _convert_to_tensor(a: list | np.ndarray | Tensor) -> Tensor:
+ """
+ Converts the input `a` to a PyTorch tensor if it is not already a tensor.
+ Handles lists of sparse tensors by stacking them.
+
+ Args:
+ a (Union[list, np.ndarray, Tensor]): The input array or tenso... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/tensor.py |
Generate consistent documentation across files |
from __future__ import annotations
import gzip
from . import InputExample
class PairedFilesReader:
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.o... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/readers/PairedFilesReader.py |
Add docstrings for better understanding | from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
save_in_root: bool =... | --- +++ @@ -11,6 +11,43 @@
class InputModule(Module):
+ """
+ Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
+ Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
+ in the forward pass.
+
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/InputModule.py |
Write Python docstrings for this snippet | from __future__ import annotations
import json
import logging
import os
from typing import TYPE_CHECKING
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from transformers import AutoTokenizer
from sentence_transformers.models.InputModule import InputModule
i... | --- +++ @@ -22,6 +22,23 @@
class SparseStaticEmbedding(InputModule):
+ """
+ SparseStaticEmbedding module for efficient sparse representations.
+
+ This lightweight module computes sparse representations by mapping input tokens to static weights,
+ such as IDF (Inverse Document Frequency) weights. It is... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/models/SparseStaticEmbedding.py |
Annotate my code with docstrings | from __future__ import annotations
import csv
import importlib
import logging
from contextlib import contextmanager
def fullname(o) -> str:
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__ # Avoid reporting __builtin__
else:
... | --- +++ @@ -7,6 +7,25 @@
def fullname(o) -> str:
+ """
+ Gives a full name (package_name.class_name) for a class / object in Python. Will
+ be used to load the correct classes from JSON files
+
+ Args:
+ o: The object for which to get the full name.
+
+ Returns:
+ str: The full name of ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/misc.py |
Add documentation for all methods | from __future__ import annotations
import logging
from collections.abc import Callable
from dataclasses import dataclass, field
from typing import Union
from packaging.version import parse as parse_version
from transformers import TrainingArguments as TransformersTrainingArguments
from transformers import __version__... | --- +++ @@ -17,6 +17,73 @@
class BatchSamplers(ExplicitEnum):
+ """
+ Stores the acceptable string identifiers for batch samplers.
+
+ The batch sampler is responsible for determining how samples are grouped into batches during training.
+ Valid options are:
+
+ - ``BatchSamplers.BATCH_SAMPLER``: **[... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/training_args.py |
Add docstrings to existing functions | from __future__ import annotations
import csv
import json
import logging
import os
import numpy as np
from sklearn.metrics import average_precision_score, ndcg_score
from tqdm import tqdm
from sentence_transformers.evaluation.SentenceEvaluator import SentenceEvaluator
logger = logging.getLogger(__name__)
class Re... | --- +++ @@ -15,6 +15,31 @@
class ReciprocalRankFusionEvaluator(SentenceEvaluator):
+ """
+ This class evaluates a hybrid search approach using Reciprocal Rank Fusion (RRF).
+
+ Given a query and two separate ranked lists of documents from different retrievers (e.g., sparse and dense),
+ it combines them... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/evaluation/ReciprocalRankFusionEvaluator.py |
Add docstrings for production code | from __future__ import annotations
import logging
from enum import Enum
from transformers.trainer_callback import TrainerCallback, TrainerControl, TrainerState
from sentence_transformers.sparse_encoder.losses.SpladeLoss import SpladeLoss
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTra... | --- +++ @@ -12,6 +12,7 @@
class SchedulerType(Enum):
+ """Types of schedulers for weight parameters in SpladeLoss"""
LINEAR = "linear"
QUADRATIC = "quadratic"
@@ -24,6 +25,18 @@ scheduler_type: str | SchedulerType = SchedulerType.QUADRATIC,
warmup_ratio: float = 1 / 3,
):
+ ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/sparse_encoder/callbacks/splade_callbacks.py |
Write proper docstrings for these functions | from __future__ import annotations
import inspect
import logging
import os
from collections.abc import Callable
from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING, Any
from sentence_transformers.backend import load_onnx_model, load_openvino_model
try:
from typing impo... | --- +++ @@ -68,6 +68,28 @@
class Transformer(InputModule):
+ """Hugging Face AutoModel to generate token embeddings.
+ Loads the correct class, e.g. BERT / RoBERTa etc.
+
+ Args:
+ model_name_or_path: Hugging Face models name
+ (https://huggingface.co/models)
+ max_seq_length: Trun... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/models/Transformer.py |
Write proper docstrings for these functions | from __future__ import annotations
import heapq
import logging
import queue
from collections.abc import Callable
from typing import TYPE_CHECKING
import numpy as np
import torch
from torch import Tensor
from tqdm.autonotebook import tqdm
from .similarity import cos_sim
from .tensor import normalize_embeddings
logge... | --- +++ @@ -34,6 +34,36 @@ prompt_name: str | None = None,
prompt: str | None = None,
) -> list[list[float | int]]:
+ """
+ Given a list of sentences / texts, this function performs paraphrase mining. It compares all sentences against all
+ other sentences and returns a list with the pairs that have ... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/util/retrieval.py |
Write documentation strings for class attributes |
import json
import os
from datetime import datetime, timezone
from pathlib import Path
from cli_anything.anygen.utils.anygen_backend import (
DOWNLOADABLE_OPERATIONS,
VALID_OPERATIONS,
create_task as _api_create,
download_file as _api_download,
download_thumbnail as _api_thumbnail,
poll_task a... | --- +++ @@ -1,3 +1,7 @@+"""Task management — create, query, poll, download AnyGen tasks.
+
+Thin wrappers around the backend that add local task history persistence.
+"""
import json
import os
@@ -36,6 +40,7 @@
def list_task_records(limit: int = 20, status_filter: str | None = None) -> list[dict]:
+ """List ... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/anygen/agent-harness/cli_anything/anygen/core/task.py |
Annotate my code with docstrings | #!/usr/bin/env python3
import sys
import os
import json
import click
from typing import Optional
# Add parent to path for imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cli_anything.audacity.core.session import Session
from cli_anything.audacity.core import project as pr... | --- +++ @@ -1,4 +1,20 @@ #!/usr/bin/env python3
+"""Audacity CLI — A stateful command-line interface for audio editing.
+
+This CLI provides full audio editing capabilities using Python stdlib
+(wave, struct, math) as the backend engine, with a JSON project format
+that tracks tracks, clips, effects, labels, and histor... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/audacity/agent-harness/cli_anything/audacity/audacity_cli.py |
Document my Python code with docstrings | #!/usr/bin/env python3
import sys
import os
import json
import click
from typing import Optional
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cli_anything.anygen.core.session import Session
from cli_anything.anygen.core import task as task_mod
from cli_anything.anygen.core imp... | --- +++ @@ -1,4 +1,15 @@ #!/usr/bin/env python3
+"""AnyGen CLI — Generate docs, slides, websites and more via AnyGen cloud API.
+
+Usage:
+ # One-shot commands
+ cli-anything-anygen task run --operation slide --prompt "AI trends presentation" --output ./
+ cli-anything-anygen task create --operation doc --prom... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/anygen/agent-harness/cli_anything/anygen/anygen_cli.py |
Annotate my code with docstrings |
from __future__ import annotations
import os
from . import InputExample
class LabelSentenceReader:
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx ... | --- +++ @@ -1,3 +1,12 @@+"""
+This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
+It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
+
+Nowadays, with Sentence Transformers v3+, it is r... | https://raw.githubusercontent.com/huggingface/sentence-transformers/HEAD/sentence_transformers/readers/LabelSentenceReader.py |
Write docstrings for backend logic |
import os
import wave
from typing import Dict, Any, List, Optional
def import_audio(path: str) -> Dict[str, Any]:
if not os.path.exists(path):
raise FileNotFoundError(f"Audio file not found: {path}")
abs_path = os.path.abspath(path)
info = {
"source": abs_path,
"filename": os.pat... | --- +++ @@ -1,3 +1,10 @@+"""Audacity CLI - Clip management module.
+
+Handles importing audio files, adding clips to tracks, trimming,
+splitting, moving, and removing clips. Each clip references a source
+audio file and has start/end times on the track timeline plus
+trim offsets within the source.
+"""
import os
... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/audacity/agent-harness/cli_anything/audacity/core/clips.py |
Add docstrings that explain logic |
import json
import os
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
def _locked_save_json(path, data, **dump_kwargs) -> None:
try:
f = open(path, "r+")
except FileNotFoundError:
os.makedirs(os.path.dirname(os.path.abspath(path)), exi... | --- +++ @@ -1,3 +1,4 @@+"""Session management — undo/redo and command history for AnyGen CLI."""
import json
import os
@@ -7,6 +8,7 @@
def _locked_save_json(path, data, **dump_kwargs) -> None:
+ """Atomically write JSON with exclusive file locking."""
try:
f = open(path, "r+")
except FileN... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/anygen/agent-harness/cli_anything/anygen/core/session.py |
Document classes and their methods |
from __future__ import annotations
import json
import os
import sys
import time
import base64
from datetime import datetime
from pathlib import Path
from typing import Callable
try:
import requests
except ImportError:
print(
"requests library not found. Install with: pip3 install requests",
f... | --- +++ @@ -1,3 +1,8 @@+"""AnyGen API backend — wraps the AnyGen OpenAPI for task lifecycle management.
+
+This module handles all HTTP communication with the AnyGen cloud service:
+create tasks, poll status, upload files, download results.
+"""
from __future__ import annotations
@@ -37,6 +42,7 @@ # ── Config ────... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/anygen/agent-harness/cli_anything/anygen/utils/anygen_backend.py |
Add structured docstrings to improve clarity |
import os
import sys
# ── ANSI color codes (no external deps for core styling) ──────────────
_RESET = "\033[0m"
_BOLD = "\033[1m"
_DIM = "\033[2m"
_ITALIC = "\033[3m"
_UNDERLINE = "\033[4m"
# Brand colors
_CYAN = "\033[38;5;80m" # cli-anything brand cyan
_CYAN_BG = "\033[48;5;80m"
_WHITE = "\033[97m"
_GRAY =... | --- +++ @@ -1,3 +1,22 @@+"""cli-anything REPL Skin — Unified terminal interface for all CLI harnesses.
+
+Copy this file into your CLI package at:
+ cli_anything/<software>/utils/repl_skin.py
+
+Usage:
+ from cli_anything.<software>.utils.repl_skin import ReplSkin
+
+ skin = ReplSkin("shotcut", version="1.0.0"... | https://raw.githubusercontent.com/HKUDS/CLI-Anything/HEAD/anygen/agent-harness/cli_anything/anygen/utils/repl_skin.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.