sample_id
string
instruction
string
completion
string
category
string
label
int64
metadata
string
run-llama/llama_index:llama-index-core/llama_index/core/chat_engine/multi_modal_context.py:MultiModalContextChatEngine.from_defaults
# Context: from typing import Any, List, Optional, Sequence, Tuple, Union from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, MessageRole, ) from llama_index.core.llms import LLM, TextBlock, ChatMessage, ImageBlock from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.prompts import PromptTemplate from llama_index.core.memory import BaseMemory, Memory from llama_index.core.settings import Settings from llama_index.core.base.base_retriever import BaseRetriever def _get_image_and_text_nodes(nodes: List[NodeWithScore]) -> Tuple[List[NodeWithScore], List[NodeWithScore]]: ... def _ensure_query_bundle(str_or_query_bundle: QueryType) -> QueryBundle: ... class MultiModalContextChatEngine(BaseChatEngine): def __init__( self, retriever: BaseRetriever, multi_modal_llm: LLM, memory: BaseMemory, system_prompt: str, context_template: Optional[Union[str, PromptTemplate]] = None, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ) -> None: self._retriever = retriever self._multi_modal_llm = multi_modal_llm context_template = context_template or DEFAULT_TEXT_QA_PROMPT if isinstance(context_template, str): context_template = PromptTemplate(context_template) self._context_template = context_template self._memory = memory self._system_prompt = system_prompt self._node_postprocessors = node_postprocessors or [] self.callback_manager = callback_manager or CallbackManager([]) for node_postprocessor in self._node_postprocessors: node_postprocessor.callback_manager = self.callback_manager def _apply_node_postprocessors(self, nodes: List[NodeWithScore], query_bundle: QueryBundle) -> List[NodeWithScore]: ... def _get_nodes(self, query_bundle: QueryBundle) -> List[NodeWithScore]: ... async def _aget_nodes(self, query_bundle: QueryBundle) -> List[NodeWithScore]: ... def synthesize(self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]], streaming: bool) -> RESPONSE_TYPE: ... async def asynthesize(self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]], streaming: bool) -> RESPONSE_TYPE: ... def chat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> AgentChatResponse: ... def stream_chat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> StreamingAgentChatResponse: ... async def achat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> AgentChatResponse: ... async def astream_chat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> StreamingAgentChatResponse: ... def reset(self) -> None: ... def chat_history(self) -> List[ChatMessage]: ... # Task: Write a Python method `from_defaults` for the class `MultiModalContextChatEngine` to initialize a MultiModalContextChatEngine from default parameters. Parameters: retriever: BaseRetriever, chat_history: Optional[List[ChatMessage]], memory: Optional[BaseMemory], system_prompt: Optional[str], node_postprocessors: Optional[List[BaseNodePostprocessor]], context_template: Optional[Union[str, PromptTemplate]], multi_modal_llm: Optional[LLM] Returns: 'MultiModalContextChatEngine'
def from_defaults( cls, retriever: BaseRetriever, chat_history: Optional[List[ChatMessage]] = None, memory: Optional[BaseMemory] = None, system_prompt: Optional[str] = None, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, context_template: Optional[Union[str, PromptTemplate]] = None, multi_modal_llm: Optional[LLM] = None, **kwargs: Any, ) -> "MultiModalContextChatEngine": """Initialize a MultiModalContextChatEngine from default parameters.""" multi_modal_llm = multi_modal_llm or Settings.llm chat_history = chat_history or [] memory = memory or Memory.from_defaults( chat_history=chat_history, token_limit=multi_modal_llm.metadata.context_window - 256, ) system_prompt = system_prompt or "" node_postprocessors = node_postprocessors or [] return cls( retriever, multi_modal_llm=multi_modal_llm, memory=memory, system_prompt=system_prompt, node_postprocessors=node_postprocessors, callback_manager=Settings.callback_manager, context_template=context_template, )
function_simple
1
{"cognitive_complexity": 5, "loc": 32, "code_loc": 17, "docstring_loc": 1, "function_name": "from_defaults", "class_name": "MultiModalContextChatEngine", "qualname": "MultiModalContextChatEngine.from_defaults", "file_path": "llama-index-core/llama_index/core/chat_engine/multi_modal_context.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py:license_header
Add a Apache-2.0 license header comment for the project 'transformers', authored by The Google AI Language Team Authors and The HuggingFace Inc, year 2018.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
license
0
{"license_type": "Apache-2.0", "author": "The Google AI Language Team Authors and The HuggingFace Inc", "year": "2018", "source": "header", "repo_id": "huggingface/transformers"}
ray-project/ray:python/ray/serve/tests/test_direct_ingress.py:test_get_serve_instance_details_json_serializable
# Context: import json import pytest import ray from ray import serve from ray.serve._private.constants import ( DEFAULT_AUTOSCALING_POLICY_NAME, HEALTHY_MESSAGE, RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT, RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT, RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT, RAY_SERVE_DIRECT_INGRESS_PORT_RETRY_COUNT, RAY_SERVE_ENABLE_DIRECT_INGRESS, RAY_SERVE_ENABLE_HA_PROXY, SERVE_DEFAULT_APP_NAME, ) from ray.serve.autoscaling_policy import default_autoscaling_policy from ray.serve.context import _get_global_client from ray.serve.tests.conftest import TEST_GRPC_SERVICER_FUNCTIONS class Collector: ... def _skip_if_ff_not_enabled(): ... def _skip_if_haproxy_enabled(): ... def _shared_serve_instance(): ... def serve_instance(_shared_serve_instance): ... class Hybrid: ... def get_target_groups(app_name: str, from_proxy_manager: bool): ... def test_proxy_is_started_on_head_only_mode(_skip_if_ff_not_enabled, serve_instance): ... def get_http_ports(route_prefix, first_only): ... def get_grpc_ports(route_prefix, first_only): ... def test_basic(_skip_if_ff_not_enabled, serve_instance): ... def test_internal_server_error(_skip_if_ff_not_enabled, serve_instance): ... def test_fastapi_app(_skip_if_ff_not_enabled, serve_instance): ... def test_http_request_id(_skip_if_ff_not_enabled, serve_instance, use_fastapi: bool): ... def test_grpc_request_id(_skip_if_ff_not_enabled, serve_instance): ... def test_multiplexed_model_id(_skip_if_ff_not_enabled, serve_instance): ... def test_health_check(_skip_if_ff_not_enabled, serve_instance): ... def test_port_retry_logic(_skip_if_ff_not_enabled, serve_instance): ... def test_replica_gives_up_after_max_port_retries_for_http(_skip_if_ff_not_enabled, serve_instance): ... def test_replica_gives_up_after_max_port_retries_for_grpc(_skip_if_ff_not_enabled, serve_instance): ... def test_no_port_available(_skip_if_ff_not_enabled, serve_instance): ... def test_replica_releases_ports_on_shutdown(_skip_if_ff_not_enabled, serve_instance): ... def test_get_serve_instance_details(_skip_if_ff_not_enabled, serve_instance): ... def test_only_ingress_deployment_replicas_are_used_for_target_groups(_skip_if_ff_not_enabled, serve_instance): ... def test_crashed_replica_port_is_released_and_reused(_skip_if_ff_not_enabled, serve_instance): ... def test_multiple_applications_on_same_node(_skip_if_ff_not_enabled, serve_instance): ... def test_app_with_composite_deployments(_skip_if_ff_not_enabled, serve_instance): ... def test_only_running_apps_are_used_for_target_groups(_skip_if_ff_not_enabled, serve_instance): ... def test_some_replicas_not_running(_skip_if_ff_not_enabled, serve_instance): ... def test_port_recovery_on_controller_restart(_skip_if_ff_not_enabled, serve_instance): ... class TestDirectIngressBackpressure: ... class TestDirectIngressAutoscaling: ... def test_disconnect(_skip_if_ff_not_enabled, serve_instance): ... def test_context_propagation(_skip_if_ff_not_enabled, serve_instance): ... def test_context_propagation_with_child(_skip_if_ff_not_enabled, serve_instance): ... def test_shutdown_replica_only_after_draining_requests(_skip_if_ff_not_enabled, serve_instance): ... def test_http_routes_endpoint(_skip_if_ff_not_enabled, serve_instance): ... def test_grpc_list_applications_endpoint(_skip_if_ff_not_enabled, _skip_if_haproxy_enabled, serve_instance): ... def test_redeploy_start_time(_skip_if_ff_not_enabled, serve_instance): ... def test_deploy_app_custom_exception(_skip_if_ff_not_enabled, serve_instance): ... def test_get_deployment_config(_skip_if_ff_not_enabled, serve_instance): ... def test_stuck_requests_are_force_killed(_skip_if_ff_not_enabled, serve_instance): ... # Task: Write a Python test function `test_get_serve_instance_details_json_serializable` to test the result from get_serve_instance_details is json serializable. Module under test: concurrent.futures, typing, uuid
def test_get_serve_instance_details_json_serializable( _skip_if_ff_not_enabled, serve_instance, policy_name ): """Test the result from get_serve_instance_details is json serializable.""" controller = _get_global_client()._controller autoscaling_config = { "min_replicas": 1, "max_replicas": 10, "_policy": {"name": policy_name}, } if policy_name is None: autoscaling_config.pop("_policy") @serve.deployment(autoscaling_config=autoscaling_config) def autoscaling_app(): return "1" serve.run(autoscaling_app.bind()) details = ray.get(controller.get_serve_instance_details.remote()) details_json = json.dumps(details) controller_details = ray.get(controller.get_actor_details.remote()) node_id = controller_details.node_id node_ip = controller_details.node_ip node_instance_id = controller_details.node_instance_id proxy_details = ray.get(controller.get_proxy_details.remote(node_id=node_id)) deployment_timestamp = ray.get( controller.get_deployment_timestamps.remote(app_name="default") ) deployment_details = ray.get( controller.get_deployment_details.remote("default", "autoscaling_app") ) replica = deployment_details.replicas[0] expected_json = json.dumps( { "controller_info": { "node_id": node_id, "node_ip": node_ip, "node_instance_id": node_instance_id, "actor_id": controller_details.actor_id, "actor_name": controller_details.actor_name, "worker_id": controller_details.worker_id, "log_file_path": controller_details.log_file_path, }, "proxy_location": "HeadOnly", "http_options": {"host": "0.0.0.0"}, "grpc_options": { "port": 9000, "grpc_servicer_functions": TEST_GRPC_SERVICER_FUNCTIONS, }, "proxies": { node_id: { "node_id": node_id, "node_ip": node_ip, "node_instance_id": node_instance_id, "actor_id": proxy_details.actor_id, "actor_name": proxy_details.actor_name, "worker_id": proxy_details.worker_id, "log_file_path": proxy_details.log_file_path, "status": proxy_details.status, } }, "applications": { "default": { "name": "default", "route_prefix": "/", "docs_path": None, "status": "RUNNING", "message": "", "last_deployed_time_s": deployment_timestamp, "deployed_app_config": None, "source": "imperative", "deployments": { "autoscaling_app": { "name": "autoscaling_app", "status": "HEALTHY", "status_trigger": "CONFIG_UPDATE_COMPLETED", "message": "", "deployment_config": { "name": "autoscaling_app", "max_ongoing_requests": 5, "max_queued_requests": -1, "user_config": None, "autoscaling_config": { "min_replicas": 1, "initial_replicas": None, "max_replicas": 10, "target_ongoing_requests": 2.0, "metrics_interval_s": 10.0, "look_back_period_s": 30.0, "smoothing_factor": 1.0, "upscale_smoothing_factor": None, "downscale_smoothing_factor": None, "upscaling_factor": None, "downscaling_factor": None, "downscale_delay_s": 600.0, "downscale_to_zero_delay_s": None, "upscale_delay_s": 30.0, "aggregation_function": "mean", "policy": { "policy_function": "ray.serve.autoscaling_policy:default_autoscaling_policy", "policy_kwargs": {}, }, }, "graceful_shutdown_wait_loop_s": 2.0, "graceful_shutdown_timeout_s": 20.0, "health_check_period_s": 10.0, "health_check_timeout_s": 30.0, "ray_actor_options": { "num_cpus": 1.0, }, "request_router_config": { "request_router_class": "ray.serve._private.request_router:PowerOfTwoChoicesRequestRouter", "request_router_kwargs": {}, "request_routing_stats_period_s": 10.0, "request_routing_stats_timeout_s": 30.0, }, }, "target_num_replicas": 1, "required_resources": {"CPU": 1}, "replicas": [ { "node_id": node_id, "node_ip": node_ip, "node_instance_id": node_instance_id, "actor_id": replica.actor_id, "actor_name": replica.actor_name, "worker_id": replica.worker_id, "log_file_path": replica.log_file_path, "replica_id": replica.replica_id, "state": "RUNNING", "pid": replica.pid, "start_time_s": replica.start_time_s, } ], } }, "external_scaler_enabled": False, "deployment_topology": { "app_name": "default", "nodes": { "autoscaling_app": { "name": "autoscaling_app", "app_name": "default", "outbound_deployments": [], "is_ingress": True, }, }, "ingress_deployment": "autoscaling_app", }, } }, "target_capacity": None, "target_groups": [ { "targets": [ { "ip": node_ip, "port": 8000 if RAY_SERVE_ENABLE_HA_PROXY else 30000, "instance_id": node_instance_id, "name": proxy_details.actor_name if RAY_SERVE_ENABLE_HA_PROXY else replica.actor_name, }, ], "route_prefix": "/", "protocol": "HTTP", "app_name": "" if RAY_SERVE_ENABLE_HA_PROXY else "default", }, { "targets": [ { "ip": node_ip, "port": 9000 if RAY_SERVE_ENABLE_HA_PROXY else 40000, "instance_id": node_instance_id, "name": proxy_details.actor_name if RAY_SERVE_ENABLE_HA_PROXY else replica.actor_name, }, ], "route_prefix": "/", "protocol": "gRPC", "app_name": "" if RAY_SERVE_ENABLE_HA_PROXY else "default", }, ], } ) assert details_json == expected_json # ensure internal field, serialized_policy_def, is not exposed application = details["applications"]["default"] deployment = application["deployments"]["autoscaling_app"] autoscaling_config = deployment["deployment_config"]["autoscaling_config"] assert "_serialized_policy_def" not in autoscaling_config
test
0
{"function_name": "test_get_serve_instance_details_json_serializable", "class_name": null, "qualname": "test_get_serve_instance_details_json_serializable", "file_path": "python/ray/serve/tests/test_direct_ingress.py", "repo_id": "ray-project/ray", "loc": 196, "tested_modules": ["concurrent.futures", "typing", "uuid", "fastapi", "starlette.requests"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/train/lint/check_circular_imports.py:expand_to_include_reexports
# Context: from typing import Dict, List, Optional, Set, Tuple def find_train_packages(base_train_dir: Path, patch_train_dir: Path) -> None: ... def is_train_package(module_str: str) -> bool: ... def get_base_dir() -> Path: ... def get_base_train_dir() -> Path: ... def does_overlap(main_module: str, module: str) -> bool: ... class Import: ... class ImportCollector(ast.NodeVisitor): ... def collect_imports(module_name: str, is_package: bool, source_text: str) -> Set[Import]: ... def to_module_name_and_is_package(py_file: Path) -> Tuple[str, bool]: ... def get_file_module_imports(files: List[Path], module_match_string: Optional[str]) -> Dict[str, List[Import]]: ... def convert_to_file_paths(imports: List[Import]) -> List[Path]: ... def check_violations(base_train_patching_imports: Dict[str, List[Import]], patch_dir: Path) -> List[str]: ... def main(): ... # Task: Write a Python function `expand_to_include_reexports` to expands the set of imports for a given import map to include the modules resulting from reexports. Parameters: import_map: Dict[str, List[Import]] Returns: None
def expand_to_include_reexports(import_map: Dict[str, List[Import]]) -> None: """ Expands the set of imports for a given import map to include the modules resulting from reexports. So if in the base train module, there is "from x import a, b" and x is a package, then this function will explore the __init__.py of x and include the modules a and b were reexported from in the import map. """ for module, base_imports in import_map.items(): # Get only the package imports packages = [imp for imp in base_imports if imp.is_package] package_files = convert_to_file_paths(packages) reexports = get_file_module_imports(package_files) agg_reexports = [] # Filter patch init file imports to those that only contain the right names for base_import in base_imports: if base_import.module in reexports: import_list = reexports[base_import.module] target_reexports = [ imp for imp in import_list if set(imp.names) & set(base_import.names) ] agg_reexports.extend(target_reexports) # Expand modules to include reexported modules import_map[module].extend(agg_reexports)
function_complex
0
{"cognitive_complexity": 6, "loc": 26, "code_loc": 15, "docstring_loc": 5, "function_name": "expand_to_include_reexports", "class_name": null, "qualname": "expand_to_include_reexports", "file_path": "python/ray/train/lint/check_circular_imports.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
Comfy-Org/ComfyUI:tests-unit/prompt_server_test/system_user_endpoint_test.py:TestSystemUserEndpointBlocking.test_userdata_post_blocks_system_user
# Context: import pytest from unittest.mock import patch def mock_user_directory(tmp_path): ... def user_manager_multi_user(mock_user_directory): ... def app_multi_user(user_manager_multi_user): ... class TestSystemUserCreationBlocking: ... class TestPublicUserStillWorks: ... class TestCustomNodeScenario: ... class TestStructuralSecurity: ... class TestSystemUserEndpointBlocking: async def test_userdata_get_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... async def test_userdata_delete_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... async def test_v2_userdata_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... async def test_move_userdata_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... # Task: Write a Python test method `test_userdata_post_blocks_system_user` in test class `TestSystemUserEndpointBlocking` to pOST /userdata with System User header should be blocked. Module under test: aiohttp, app.user_manager
async def test_userdata_post_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ POST /userdata with System User header should be blocked. """ client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.post( "/userdata/test.txt", headers={"comfy-user": "__system"}, data=b"malicious content" ) assert resp.status in [400, 403, 500], \ f"System User write should be blocked, got {resp.status}" # Verify no file was created assert not (mock_user_directory / "__system" / "test.txt").exists()
test
1
{"function_name": "test_userdata_post_blocks_system_user", "class_name": "TestSystemUserEndpointBlocking", "qualname": "TestSystemUserEndpointBlocking.test_userdata_post_blocks_system_user", "file_path": "tests-unit/prompt_server_test/system_user_endpoint_test.py", "repo_id": "Comfy-Org/ComfyUI", "loc": 21, "tested_modules": ["aiohttp", "app.user_manager"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:release/nightly_tests/dataset/training_ingest_benchmark.py:BaseDataLoader.__init__
# Context: from typing import Dict, List, Optional from dataset_benchmark_util import IMAGENET_WNID_TO_ID class BenchmarkConfig: ... class S3ParquetDataLoader(BaseDataLoader): ... class S3UrlImageDataLoader(BaseDataLoader): ... class S3ReadImagesDataLoader(BaseDataLoader): ... def create_data_loader(data_loader: str, split: str) -> BaseDataLoader: ... def benchmark_iteration(dataset: ray.data.Dataset, batch_size: int, prefetch_batches: int, num_batches: int, simulated_training_time: float, device: str, pin_memory: bool) -> Dict[str, float]: ... def run_benchmark(config: BenchmarkConfig) -> List[Dict]: ... def print_summary(results: List[Dict]): ... def main(): ... class BaseDataLoader(ABC): TRANSFORM_CONFIGS = { def get_transform(cls, transform_type: str) -> transforms.Compose: ... def tensor_to_numpy(tensor) -> np.ndarray: ... def add_image_columns(result: Dict, processed_image: np.ndarray, num_columns: int): ... def make_split_dirs(s3_root: str) -> Dict[str, str]: ... def compute_limit(batch_size: int, num_batches: int) -> int: ... def create_dataset(self, transform_type: str, batch_size: int, num_batches: int, num_image_columns: int) -> ray.data.Dataset: ... # Task: Write a Python method `__init__` for the class `BaseDataLoader` to initialize the data loader. Parameters: data_dir: str, label_to_id_map: Dict[str, int]
def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None): """Initialize the data loader. Args: data_dir: Path to data directory label_to_id_map: Mapping from label strings to integer IDs """ self.data_dir = data_dir self.label_to_id_map = label_to_id_map or IMAGENET_WNID_TO_ID
function_simple
0
{"cognitive_complexity": 1, "loc": 9, "code_loc": 2, "docstring_loc": 6, "function_name": "__init__", "class_name": "BaseDataLoader", "qualname": "BaseDataLoader.__init__", "file_path": "release/nightly_tests/dataset/training_ingest_benchmark.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestBuildRichFieldDescription.test_min_max_length
# Context: from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestRequiredOptional: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestStripUnsupportedFormats: ... class TestEnsureTypeInSchemas: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestBuildRichFieldDescription: def test_description_only(self) -> None: ... def test_empty_schema(self) -> None: ... def test_format(self) -> None: ... def test_enum(self) -> None: ... def test_pattern(self) -> None: ... def test_min_max(self) -> None: ... def test_examples(self) -> None: ... def test_combined_constraints(self) -> None: ... # Task: Write a Python test method `test_min_max_length` in test class `TestBuildRichFieldDescription` to verify the behavior of `min_max_length`. Module under test: __future__, copy, typing
def test_min_max_length(self) -> None: desc = build_rich_field_description({"minLength": 1, "maxLength": 255}) assert "Min length: 1" in desc assert "Max length: 255" in desc
test
0
{"function_name": "test_min_max_length", "class_name": "TestBuildRichFieldDescription", "qualname": "TestBuildRichFieldDescription.test_min_max_length", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/dashboard/tests/test_dashboard_auth.py:test_dashboard_request_requires_auth_invalid_token
# Context: import requests def test_dashboard_request_requires_auth_with_valid_token(setup_cluster_with_token_auth): ... def test_dashboard_request_requires_auth_missing_token(setup_cluster_with_token_auth): ... def test_dashboard_request_with_ray_auth_header(setup_cluster_with_token_auth): ... def test_authorization_header_takes_precedence(setup_cluster_with_token_auth): ... def test_dashboard_auth_disabled(setup_cluster_without_token_auth): ... def test_authentication_mode_endpoint_with_token_auth(setup_cluster_with_token_auth): ... def test_authentication_mode_endpoint_without_auth(setup_cluster_without_token_auth): ... def test_authentication_mode_endpoint_is_public(setup_cluster_with_token_auth): ... # Task: Write a Python test function `test_dashboard_request_requires_auth_invalid_token` to test that requests fail with invalid token when auth is enabled.
def test_dashboard_request_requires_auth_invalid_token(setup_cluster_with_token_auth): """Test that requests fail with invalid token when auth is enabled.""" cluster_info = setup_cluster_with_token_auth headers = {"Authorization": "Bearer wrong_token_00000000000000000000000000000000"} response = requests.get( f"{cluster_info['dashboard_url']}/api/component_activities", json={"test": "data"}, headers=headers, ) assert response.status_code == 403
test
0
{"function_name": "test_dashboard_request_requires_auth_invalid_token", "class_name": null, "qualname": "test_dashboard_request_requires_auth_invalid_token", "file_path": "python/ray/dashboard/tests/test_dashboard_auth.py", "repo_id": "ray-project/ray", "loc": 13, "tested_modules": [], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04b_tabular_workload_pattern.py:_dmat_from_arrow
# Context: import numpy as np import xgboost as xgb import pyarrow as pa def _arrow_table_from_shard(name: str) -> pa.Table: ... def train_func(config): ... class XGBPredictor: ... # Task: Write a Python function `_dmat_from_arrow` to build XGBoost DMatrix from pyarrow.Table with explicit feature_names. Parameters: table: pa.Table, feature_cols, label_col: str
def _dmat_from_arrow(table: pa.Table, feature_cols, label_col: str): """Build XGBoost DMatrix from pyarrow.Table with explicit feature_names.""" X = np.column_stack([table[c].to_numpy(zero_copy_only=False) for c in feature_cols]) y = table[label_col].to_numpy(zero_copy_only=False) return xgb.DMatrix(X, label=y, feature_names=feature_cols)
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "_dmat_from_arrow", "class_name": null, "qualname": "_dmat_from_arrow", "file_path": "doc/source/train/tutorials/ci/py_scripts/04b_tabular_workload_pattern.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py:test_hunyuan_a13b_tool_parser_streaming
# Context: from unittest.mock import MagicMock import pytest from tests.entrypoints.openai.tool_parsers.utils import ( run_tool_extraction, run_tool_extraction_streaming, ) from vllm.tool_parsers import ToolParser, ToolParserManager def make_tool_call(name, arguments): ... def test_hunyuan_a13b_tool_parser_extract(model_output, expected_tool_calls, expected_content): ... # Task: Write a Python test function `test_hunyuan_a13b_tool_parser_streaming` to verify the behavior of `hunyuan_a13b_tool_parser_streaming`. Module under test: tests.entrypoints.openai.tool_parsers.utils, vllm.entrypoints.openai.engine.protocol, vllm.tool_parsers
def test_hunyuan_a13b_tool_parser_streaming(model_deltas, expected_tool_calls): mock_tokenizer = MagicMock() tool_parser: ToolParser = ToolParserManager.get_tool_parser("hunyuan_a13b")( mock_tokenizer ) reconstructor = run_tool_extraction_streaming( tool_parser, model_deltas, assert_one_tool_per_delta=False ) # align the random id. for idx in range(len(reconstructor.tool_calls)): reconstructor.tool_calls[idx].id = expected_tool_calls[idx].id assert reconstructor.tool_calls == expected_tool_calls
test
1
{"function_name": "test_hunyuan_a13b_tool_parser_streaming", "class_name": null, "qualname": "test_hunyuan_a13b_tool_parser_streaming", "file_path": "tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py", "repo_id": "vllm-project/vllm", "loc": 15, "tested_modules": ["tests.entrypoints.openai.tool_parsers.utils", "vllm.entrypoints.openai.engine.protocol", "vllm.tool_parsers"], "has_docstring": false, "runnable_level": "project_runnable"}
zhayujie/chatgpt-on-wechat:agent/tools/scheduler/scheduler_tool.py:module_doc
Write a module-level docstring for the Python module `scheduler_tool` which contains class `SchedulerTool`.
Scheduler tool for creating and managing scheduled tasks
documentation
1
{"doc_type": "module", "module_name": "scheduler_tool", "file_path": "agent/tools/scheduler/scheduler_tool.py", "repo_id": "zhayujie/chatgpt-on-wechat", "char_length": 56}
infiniflow/ragflow:test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py:TestChatAssistantsDelete.test_repeated_deletion
# Context: import pytest class TestChatAssistantsDelete: def test_basic_scenarios(self, client, add_chat_assistants_func, payload, expected_message, remaining): ... def test_delete_chats_nonzero_response_raises(self, client, monkeypatch): ... def test_delete_partial_invalid_id(self, client, add_chat_assistants_func, payload): ... def test_duplicate_deletion(self, client, add_chat_assistants_func): ... def test_concurrent_deletion(self, client): ... def test_delete_1k(self, client): ... # Task: Write a Python test method `test_repeated_deletion` in test class `TestChatAssistantsDelete` to verify the behavior of `repeated_deletion`. Module under test: concurrent.futures, common
def test_repeated_deletion(self, client, add_chat_assistants_func): _, _, chat_assistants = add_chat_assistants_func chat_ids = [chat.id for chat in chat_assistants] client.delete_chats(ids=chat_ids) with pytest.raises(Exception) as exception_info: client.delete_chats(ids=chat_ids) assert "not found" in str(exception_info.value)
test
1
{"function_name": "test_repeated_deletion", "class_name": "TestChatAssistantsDelete", "qualname": "TestChatAssistantsDelete.test_repeated_deletion", "file_path": "test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py", "repo_id": "infiniflow/ragflow", "loc": 8, "tested_modules": ["concurrent.futures", "common"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_keycloak.py:TestKeycloakProvider.test_get_token_url_with_different_domain
# Context: from crewai.cli.authentication.main import Oauth2Settings from crewai.cli.authentication.providers.keycloak import KeycloakProvider class TestKeycloakProvider: def setup_method(self): ... def test_initialization_with_valid_settings(self): ... def test_get_authorize_url(self): ... def test_get_authorize_url_with_different_domain(self): ... def test_get_token_url(self): ... def test_get_jwks_url(self): ... def test_get_jwks_url_with_different_domain(self): ... def test_get_issuer(self): ... def test_get_issuer_with_different_domain(self): ... def test_get_audience(self): ... def test_get_client_id(self): ... def test_get_required_fields(self): ... def test_oauth2_base_url(self): ... def test_oauth2_base_url_strips_https_prefix(self): ... def test_oauth2_base_url_strips_http_prefix(self): ... # Task: Write a Python test method `test_get_token_url_with_different_domain` in test class `TestKeycloakProvider` to verify the behavior of `get_token_url_with_different_domain`. Module under test: crewai.cli.authentication.main, crewai.cli.authentication.providers.keycloak
def test_get_token_url_with_different_domain(self): settings = Oauth2Settings( provider="keycloak", domain="sso.enterprise.com", client_id="test-client", audience="test-audience", extra={ "realm": "enterprise-realm" } ) provider = KeycloakProvider(settings) expected_url = "https://sso.enterprise.com/realms/enterprise-realm/protocol/openid-connect/token" assert provider.get_token_url() == expected_url
test
0
{"function_name": "test_get_token_url_with_different_domain", "class_name": "TestKeycloakProvider", "qualname": "TestKeycloakProvider.test_get_token_url_with_different_domain", "file_path": "lib/crewai/tests/cli/authentication/providers/test_keycloak.py", "repo_id": "crewAIInc/crewAI", "loc": 13, "tested_modules": ["crewai.cli.authentication.main", "crewai.cli.authentication.providers.keycloak"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestRequiredOptional.test_required_field_has_no_default
# Context: import pytest from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestBuildRichFieldDescription: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestStripUnsupportedFormats: ... class TestEnsureTypeInSchemas: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestRequiredOptional: def test_optional_field_defaults_to_none(self) -> None: ... def test_mixed_required_optional(self) -> None: ... # Task: Write a Python test method `test_required_field_has_no_default` in test class `TestRequiredOptional` to verify the behavior of `required_field_has_no_default`. Module under test: __future__, copy, typing
def test_required_field_has_no_default(self) -> None: schema = { "type": "object", "properties": {"name": {"type": "string"}}, "required": ["name"], } Model = create_model_from_schema(schema) with pytest.raises(Exception): Model()
test
0
{"function_name": "test_required_field_has_no_default", "class_name": "TestRequiredOptional", "qualname": "TestRequiredOptional.test_required_field_has_no_default", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_decorator.py:TestHumanFeedbackLearn.test_learn_true_empty_feedback_does_not_store
# Context: from unittest.mock import MagicMock, patch from crewai.flow import Flow, human_feedback, listen, start class TestHumanFeedbackValidation: ... class TestHumanFeedbackConfig: ... class TestHumanFeedbackResult: ... class TestDecoratorAttributePreservation: ... class TestAsyncSupport: ... class TestHumanFeedbackExecution: ... class TestHumanFeedbackHistory: ... class TestCollapseToOutcome: ... class TestHumanFeedbackLearn: def test_learn_false_does_not_interact_with_memory(self): ... def test_learn_true_stores_distilled_lessons(self): ... def test_learn_true_pre_reviews_with_past_lessons(self): ... def test_learn_true_uses_default_llm(self): ... # Task: Write a Python test method `test_learn_true_empty_feedback_does_not_store` in test class `TestHumanFeedbackLearn` to when learn=True but feedback is empty, no lessons are stored. Module under test: __future__, datetime, typing
def test_learn_true_empty_feedback_does_not_store(self): """When learn=True but feedback is empty, no lessons are stored.""" class LearnFlow(Flow): @start() @human_feedback(message="Review:", llm="gpt-4o-mini", learn=True) def produce(self): return "output" flow = LearnFlow() flow.memory = MagicMock() flow.memory.recall.return_value = [] with patch.object( flow, "_request_human_feedback", return_value="" ): flow.produce() # Empty feedback -> no distillation, no storage flow.memory.remember_many.assert_not_called()
test
0
{"function_name": "test_learn_true_empty_feedback_does_not_store", "class_name": "TestHumanFeedbackLearn", "qualname": "TestHumanFeedbackLearn.test_learn_true_empty_feedback_does_not_store", "file_path": "lib/crewai/tests/test_human_feedback_decorator.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow", "crewai.flow.human_feedback"], "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:tests/ci/test_cli_headed_flag.py:test_headed_flag_with_session
# Context: from browser_use.skill_cli.main import build_parser def test_headed_flag_before_open_subcommand(): ... def test_headed_flag_default_is_false(): ... def test_headed_flag_with_browser_mode(): ... # Task: Write a Python test function `test_headed_flag_with_session` to test that --headed works with other global flags like -s/--session. Module under test: browser_use.skill_cli.main
def test_headed_flag_with_session(): """Test that --headed works with other global flags like -s/--session.""" parser = build_parser() args = parser.parse_args(['--headed', '-s', 'mysession', 'open', 'http://example.com']) assert args.headed is True assert args.session == 'mysession' assert args.url == 'http://example.com'
test
0
{"function_name": "test_headed_flag_with_session", "class_name": null, "qualname": "test_headed_flag_with_session", "file_path": "tests/ci/test_cli_headed_flag.py", "repo_id": "browser-use/browser-use", "loc": 8, "tested_modules": ["browser_use.skill_cli.main"], "has_docstring": true, "runnable_level": "project_runnable"}
xtekky/gpt4free:g4f/Provider/qwen/cookie_generator.py:refresh_cookies
# Context: from g4f import debug import asyncio def lzw_compress(data: Optional[str], bits: int, char_func: Callable[[int], str]) -> str: ... def custom_encode(data: Optional[str], url_safe: bool) -> str: ... def random_hash() -> int: ... def generate_device_id() -> str: ... def parse_real_data(real_data: str) -> List[str]: ... def process_fields(fields: List[str]) -> List[Union[str, int]]: ... def generate_cookies(real_data: Optional[str], fingerprint_options: Optional[Dict[str, Any]]) -> Dict[str, Any]: ... def generate_batch(count: int, real_data: Optional[str], fingerprint_options: Optional[Dict[str, Any]]) -> List[Dict[str, Any]]: ... async def _refresh_loop() -> None: ... def init_ssxmod_manager() -> None: ... async def stop_refresh() -> None: ... async def get_ssxmod_itna() -> str: ... async def get_ssxmod_itna2() -> str: ... async def get_cookies() -> Dict[str, Any]: ... # Task: Write a Python async function `refresh_cookies` to refresh SSXMOD cookies (async wrapper).
async def refresh_cookies(): """Refresh SSXMOD cookies (async wrapper).""" global _current_cookies try: # generate_cookies() is CPU-bound sync; run it off the event loop. result = await asyncio.to_thread(generate_cookies) async with _lock: _current_cookies = { "ssxmod_itna": result["ssxmod_itna"], "ssxmod_itna2": result["ssxmod_itna2"], "timestamp": result["timestamp"], } debug.log("SSXMOD Cookie 已刷新", "SSXMOD") except Exception as e: debug.error("SSXMOD Cookie 刷新失败", "SSXMOD", "", str(e)) return _current_cookies
function_simple
1
{"cognitive_complexity": 1, "loc": 18, "code_loc": 13, "docstring_loc": 1, "function_name": "refresh_cookies", "class_name": null, "qualname": "refresh_cookies", "file_path": "g4f/Provider/qwen/cookie_generator.py", "repo_id": "xtekky/gpt4free", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py:InvokeCrewAIAutomationTool:class_doc
Write a class-level docstring for `InvokeCrewAIAutomationTool` (inherits from BaseTool) which has methods: `__init__`, `_kickoff_crew`, `_get_crew_status`, `_run`.
A CrewAI tool for invoking external crew/flows APIs. This tool provides CrewAI Platform API integration with external crew services, supporting: - Dynamic input schema configuration - Automatic polling for task completion - Bearer token authentication - Comprehensive error handling Example: Basic usage: >>> tool = InvokeCrewAIAutomationTool( ... crew_api_url="https://api.example.com", ... crew_bearer_token="your_token", ... crew_name="My Crew", ... crew_description="Description of what the crew does", ... ) With custom inputs: >>> custom_inputs = { ... "param1": Field(..., description="Description of param1"), ... "param2": Field( ... default="default_value", description="Description of param2" ... ), ... } >>> tool = InvokeCrewAIAutomationTool( ... crew_api_url="https://api.example.com", ... crew_bearer_token="your_token", ... crew_name="My Crew", ... crew_description="Description of what the crew does", ... crew_inputs=custom_inputs, ... ) Example: >>> tools = [ ... InvokeCrewAIAutomationTool( ... crew_api_url="https://canary-crew-[...].crewai.com", ... crew_bearer_token="[Your token: abcdef012345]", ... crew_name="State of AI Report", ... crew_description="Retrieves a report on state of AI for a given year.", ... crew_inputs={ ... "year": Field( ... ..., description="Year to retrieve the report for (integer)" ... ) ... }, ... ) ... ]
documentation
0
{"doc_type": "class", "class_name": "InvokeCrewAIAutomationTool", "file_path": "lib/crewai-tools/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py", "repo_id": "crewAIInc/crewAI", "char_length": 1650, "methods": ["__init__", "_kickoff_crew", "_get_crew_status", "_run"]}
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/config.py:A2AClientConfig:class_doc
Write a class-level docstring for `A2AClientConfig` (inherits from BaseModel) which has methods: `_migrate_deprecated_transport_fields`.
Configuration for connecting to remote A2A agents. Attributes: endpoint: A2A agent endpoint URL. auth: Authentication scheme. timeout: Request timeout in seconds. max_turns: Maximum conversation turns with A2A agent. response_model: Optional Pydantic model for structured A2A agent responses. fail_fast: If True, raise error when agent unreachable; if False, skip and continue. trust_remote_completion_status: If True, return A2A agent's result directly when completed. updates: Update mechanism config. accepted_output_modes: Media types the client can accept in responses. extensions: Extension URIs the client supports (A2A protocol extensions). client_extensions: Client-side processing hooks for tool injection and prompt augmentation. transport: Transport configuration (preferred, supported transports, gRPC settings).
documentation
0
{"doc_type": "class", "class_name": "A2AClientConfig", "file_path": "lib/crewai/src/crewai/a2a/config.py", "repo_id": "crewAIInc/crewAI", "char_length": 874, "methods": ["_migrate_deprecated_transport_fields"]}
ray-project/ray:python/ray/llm/tests/common/cloud/test_utils.py:TestRemoteObjectCacheDecorator.test_expiration
# Context: import asyncio import pytest from ray.llm._internal.common.utils.cloud_utils import ( CloudObjectCache, is_remote_path, remote_object_cache, ) class MockSyncFetcher: ... class MockAsyncFetcher: ... class TestCloudObjectCache: ... class TestIsRemotePath: ... class TestRemoteObjectCacheDecorator: async def test_basic_functionality(self): ... async def test_error_handling(self): ... async def test_concurrent_access(self): ... # Task: Write a Python test method `test_expiration` in test class `TestRemoteObjectCacheDecorator` to test cache expiration for both missing and existing objects. Module under test: ray.llm._internal.common.utils.cloud_utils
async def test_expiration(self): """Test cache expiration for both missing and existing objects.""" call_count = 0 MISSING = object() @remote_object_cache( max_size=2, missing_expire_seconds=1, # 1 second to expire missing object exists_expire_seconds=3, # 3 seconds to expire existing object missing_object_value=MISSING, ) async def fetch(key: str): nonlocal call_count call_count += 1 if key == "missing": return MISSING return f"value-{key}" # Test missing object expiration assert await fetch("missing") is MISSING assert call_count == 1 assert await fetch("missing") is MISSING # Should hit cache assert call_count == 1 await asyncio.sleep(1.5) # Wait for missing object to expire assert await fetch("missing") is MISSING # Should fetch again assert call_count == 2 # Test existing object expiration assert await fetch("key1") == "value-key1" assert call_count == 3 assert await fetch("key1") == "value-key1" # Should hit cache assert call_count == 3 await asyncio.sleep(1.5) # Not expired yet assert await fetch("key1") == "value-key1" # Should still hit cache assert call_count == 3 await asyncio.sleep(2) # Now expired (total > 3 seconds) assert await fetch("key1") == "value-key1" # Should fetch again assert call_count == 4
test
0
{"function_name": "test_expiration", "class_name": "TestRemoteObjectCacheDecorator", "qualname": "TestRemoteObjectCacheDecorator.test_expiration", "file_path": "python/ray/llm/tests/common/cloud/test_utils.py", "repo_id": "ray-project/ray", "loc": 41, "tested_modules": ["ray.llm._internal.common.utils.cloud_utils"], "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/diffusers:src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py:license_header
Add a Apache-2.0 license header comment for the project 'diffusers', authored by Qwen-Image Team, InstantX Team and The HuggingFace Team, year 2025.
# Copyright 2025 Qwen-Image Team, InstantX Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
license
1
{"license_type": "Apache-2.0", "author": "Qwen-Image Team, InstantX Team and The HuggingFace Team", "year": "2025", "source": "header", "repo_id": "huggingface/diffusers"}
browser-use/browser-use:browser_use/skills/service.py:module_doc
Write a module-level docstring for the Python module `service` which contains class `SkillService`.
Skills service for fetching and executing skills from the Browser Use API
documentation
0
{"doc_type": "module", "module_name": "service", "file_path": "browser_use/skills/service.py", "repo_id": "browser-use/browser-use", "char_length": 73}
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py:TestChunksRetrieval.test_keyword
# Context: import pytest from common import retrieval_chunks class TestAuthorization: ... class TestChunksRetrieval: def test_basic_scenarios(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_page(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_page_size(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_vector_similarity_weight(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_top_k(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_rerank_id(self, WebApiAuth, add_chunks, payload, expected_code, expected_message): ... def test_highlight(self, WebApiAuth, add_chunks, payload, expected_code, expected_highlight, expected_message): ... def test_invalid_params(self, WebApiAuth, add_chunks): ... def test_concurrent_retrieval(self, WebApiAuth, add_chunks): ... # Task: Write a Python test method `test_keyword` in test class `TestChunksRetrieval` to verify the behavior of `keyword`. Module under test: concurrent.futures, common, configs
def test_keyword(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): dataset_id, _, _ = add_chunks payload.update({"question": "chunk test", "kb_id": [dataset_id]}) res = retrieval_chunks(WebApiAuth, payload) assert res["code"] == expected_code, res if expected_code == 0: assert len(res["data"]["chunks"]) == expected_page_size, res else: assert res["message"] == expected_message, res
test
1
{"function_name": "test_keyword", "class_name": "TestChunksRetrieval", "qualname": "TestChunksRetrieval.test_keyword", "file_path": "test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py", "repo_id": "infiniflow/ragflow", "loc": 9, "tested_modules": ["concurrent.futures", "common", "configs", "libs.auth"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai-tools/tests/tools/tool_collection_test.py:TestToolCollection.test_access_by_index
# Context: class TestToolCollection(unittest.TestCase): def setUp(self): ... def _create_mock_tool(self, name, description): ... def test_initialization(self): ... def test_empty_initialization(self): ... def test_initialization_with_none(self): ... def test_access_by_name(self): ... def test_key_error_for_invalid_name(self): ... def test_index_error_for_invalid_index(self): ... def test_negative_index(self): ... def test_append(self): ... def test_append_duplicate_name(self): ... def test_extend(self): ... def test_insert(self): ... def test_remove(self): ... def test_remove_nonexistent_tool(self): ... def test_pop(self): ... def test_pop_last(self): ... def test_clear(self): ... def test_iteration(self): ... def test_contains(self): ... def test_slicing(self): ... def test_getitem_with_tool_name_as_int(self): ... def test_filter_by_names(self): ... def test_filter_where(self): ... # Task: Write a Python test method `test_access_by_index` in test class `TestToolCollection` to verify the behavior of `access_by_index`. Module under test: crewai.tools, crewai_tools.adapters.tool_collection
def test_access_by_index(self): self.assertEqual(self.tools[0], self.search_tool) self.assertEqual(self.tools[1], self.calculator_tool) self.assertEqual(self.tools[2], self.translator_tool)
test
0
{"function_name": "test_access_by_index", "class_name": "TestToolCollection", "qualname": "TestToolCollection.test_access_by_index", "file_path": "lib/crewai-tools/tests/tools/tool_collection_test.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["crewai.tools", "crewai_tools.adapters.tool_collection"], "has_docstring": false, "runnable_level": "class_runnable"}
browser-use/browser-use:tests/ci/test_structured_extraction.py:TestSchemaDictToPydanticModel.test_rejects_ref
# Context: import pytest from browser_use.tools.extraction.schema_utils import schema_dict_to_pydantic_model class TestExtractionResult: ... def _make_extraction_llm(structured_response: dict | None, freetext_response: str) -> BaseChatModel: ... async def browser_session(): ... def http_server(): ... def base_url(http_server): ... class TestExtractStructured: ... class TestExtractionSchemaInjection: ... class TestSchemaDictToPydanticModel: def test_flat_object(self): ... def test_nested_object(self): ... def test_array_of_objects(self): ... def test_array_of_primitives(self): ... def test_enum_field(self): ... def test_optional_enum_defaults_to_none(self): ... def test_optional_fields_get_type_appropriate_defaults(self): ... def test_optional_non_nullable_rejects_null(self): ... def test_optional_with_explicit_default(self): ... def test_optional_nested_object_defaults_to_none(self): ... def test_model_name_from_title(self): ... def test_model_validate_json_roundtrip(self): ... def test_rejects_allOf(self): ... def test_rejects_non_object_toplevel(self): ... def test_rejects_empty_properties(self): ... def test_extra_fields_forbidden(self): ... def test_nullable_field(self): ... def test_field_descriptions_preserved(self): ... # Task: Write a Python test method `test_rejects_ref` in test class `TestSchemaDictToPydanticModel` to verify the behavior of `rejects_ref`. Module under test: pydantic, browser_use.agent.views, browser_use.browser
def test_rejects_ref(self): schema = { 'type': 'object', 'properties': {'item': {'$ref': '#/$defs/Item'}}, '$defs': {'Item': {'type': 'object', 'properties': {'name': {'type': 'string'}}}}, } with pytest.raises(ValueError, match='Unsupported JSON Schema keyword'): schema_dict_to_pydantic_model(schema)
test
0
{"function_name": "test_rejects_ref", "class_name": "TestSchemaDictToPydanticModel", "qualname": "TestSchemaDictToPydanticModel.test_rejects_ref", "file_path": "tests/ci/test_structured_extraction.py", "repo_id": "browser-use/browser-use", "loc": 8, "tested_modules": ["pydantic", "browser_use.agent.views", "browser_use.browser", "browser_use.filesystem.file_system", "browser_use.llm.base"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:ci/ray_ci/ray_image.py:RayImage.wanda_image_name
# Context: class RayImageError(Exception): ... class RayImage: def __post_init__(self): ... def arch_suffix(self) -> str: ... def repo(self) -> str: ... def variation_suffix(self) -> str: ... def validate(self) -> None: ... # Task: Write a Python method `wanda_image_name` for the class `RayImage` to wanda output image name (without registry prefix). Returns: str
def wanda_image_name(self) -> str: """Wanda output image name (without registry prefix).""" if self.platform == "cpu": return f"{self.image_type}-py{self.python_version}-cpu{self.arch_suffix}" return f"{self.image_type}-py{self.python_version}-{self.platform}{self.arch_suffix}"
function_simple
0
{"cognitive_complexity": 1, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "wanda_image_name", "class_name": "RayImage", "qualname": "RayImage.wanda_image_name", "file_path": "ci/ray_ci/ray_image.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"}
infiniflow/ragflow:common/data_source/confluence_connector.py:ConfluenceConnector._fetch_page_attachments
# Context: import logging from pathlib import Path from typing import Any, cast, Iterator, Callable, Generator from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource, CONTINUE_ON_CONNECTOR_FAILURE, \ CONFLUENCE_CONNECTOR_LABELS_TO_SKIP, CONFLUENCE_TIMEZONE_OFFSET, CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE, \ CONFLUENCE_SYNC_TIME_BUFFER_SECONDS, \ OAUTH_CONFLUENCE_CLOUD_CLIENT_ID, OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET, _DEFAULT_PAGINATION_LIMIT, \ _PROBLEMATIC_EXPANSIONS, _REPLACEMENT_EXPANSIONS, _USER_NOT_FOUND, _COMMENT_EXPANSION_FIELDS, \ _ATTACHMENT_EXPANSION_FIELDS, _PAGE_EXPANSION_FIELDS, ONE_DAY, ONE_HOUR, _RESTRICTIONS_EXPANSION_FIELDS, \ _SLIM_DOC_BATCH_SIZE, CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD from common.data_source.interfaces import ( ConnectorCheckpoint, CredentialsConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync, StaticCredentialsProvider, CheckpointedConnector, SlimConnector, CredentialsProviderInterface, ConfluenceUser, IndexingHeartbeatInterface, AttachmentProcessingResult, CheckpointOutput ) from common.data_source.models import ConnectorFailure, Document, TextSection, ImageSection, BasicExpertInfo, \ DocumentFailure, GenerateSlimDocumentOutput, SlimDocument, ExternalAccess from common.data_source.utils import load_all_docs_from_checkpoint_connector, scoped_url, \ process_confluence_user_profiles_override, confluence_refresh_tokens, run_with_timeout, _handle_http_error, \ update_param_in_path, get_start_param_from_url, build_confluence_document_id, datetime_from_string, \ is_atlassian_date_error, validate_attachment_filetype class ConfluenceCheckpoint(ConnectorCheckpoint): ... class ConfluenceRateLimitError(Exception): ... class OnyxConfluence: ... def get_user_email_from_username__server(confluence_client: OnyxConfluence, user_name: str) -> str | None: ... def _get_user(confluence_client: OnyxConfluence, user_id: str) -> str: ... def sanitize_attachment_title(title: str) -> str: ... def extract_text_from_confluence_html(confluence_client: OnyxConfluence, confluence_object: dict[str, Any], fetched_titles: set[str]) -> str: ... def _remove_macro_stylings(soup: bs4.BeautifulSoup) -> None: ... def get_page_restrictions(confluence_client: OnyxConfluence, page_id: str, page_restrictions: dict[str, Any], ancestors: list[dict[str, Any]]) -> ExternalAccess | None: ... def get_all_space_permissions(confluence_client: OnyxConfluence, is_cloud: bool) -> dict[str, ExternalAccess]: ... def _make_attachment_link(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], parent_content_id: str | None) -> str | None: ... def _process_image_attachment(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], raw_bytes: bytes, media_type: str) -> AttachmentProcessingResult: ... def process_attachment(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], parent_content_id: str | None, allow_images: bool) -> AttachmentProcessingResult: ... def convert_attachment_to_content(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], page_id: str, allow_images: bool) -> tuple[str | None, bytes | bytearray | None] | None: ... class ConfluenceConnector(CheckpointedConnector[ConfluenceCheckpoint], SlimConnector, SlimConnectorWithPermSync, CredentialsConnector): def __init__( self, wiki_base: str, is_cloud: bool, space: str = "", page_id: str = "", index_recursively: bool = False, cql_query: str | None = None, batch_size: int = INDEX_BATCH_SIZE, continue_on_failure: bool = CONTINUE_ON_CONNECTOR_FAILURE, # if a page has one of the labels specified in this list, we will just # skip it. This is generally used to avoid indexing extra sensitive # pages. labels_to_skip: list[str] = CONFLUENCE_CONNECTOR_LABELS_TO_SKIP, timezone_offset: float = CONFLUENCE_TIMEZONE_OFFSET, time_buffer_seconds: int = CONFLUENCE_SYNC_TIME_BUFFER_SECONDS, scoped_token: bool = False, ) -> None: self.wiki_base = wiki_base self.is_cloud = is_cloud self.space = space self.page_id = page_id self.index_recursively = index_recursively self.cql_query = cql_query self.batch_size = batch_size self.labels_to_skip = labels_to_skip self.timezone_offset = timezone_offset self.time_buffer_seconds = max(0, time_buffer_seconds) self.scoped_token = scoped_token self._confluence_client: OnyxConfluence | None = None self._low_timeout_confluence_client: OnyxConfluence | None = None self._fetched_titles: set[str] = set() self.allow_images = False # Track document names to detect duplicates self._document_name_counts: dict[str, int] = {} self._document_name_paths: dict[str, list[str]] = {} # Remove trailing slash from wiki_base if present self.wiki_base = wiki_base.rstrip("/") """ If nothing is provided, we default to fetching all pages Only one or none of the following options should be specified so the order shouldn't matter However, we use elif to ensure that only of the following is enforced """ base_cql_page_query = "type=page" if cql_query: base_cql_page_query = cql_query elif page_id: if index_recursively: base_cql_page_query += f" and (ancestor='{page_id}' or id='{page_id}')" else: base_cql_page_query += f" and id='{page_id}'" elif space: uri_safe_space = quote(space) base_cql_page_query += f" and space='{uri_safe_space}'" self.base_cql_page_query = base_cql_page_query self.cql_label_filter = "" if labels_to_skip: labels_to_skip = list(set(labels_to_skip)) comma_separated_labels = ",".join( f"'{quote(label)}'" for label in labels_to_skip ) self.cql_label_filter = f" and label not in ({comma_separated_labels})" self.timezone: timezone = timezone(offset=timedelta(hours=timezone_offset)) self.credentials_provider: CredentialsProviderInterface | None = None self.probe_kwargs = { "max_backoff_retries": 6, "max_backoff_seconds": 10, } self.final_kwargs = { "max_backoff_retries": 10, "max_backoff_seconds": 60, } # deprecated self.continue_on_failure = continue_on_failure def set_allow_images(self, value: bool) -> None: ... def _adjust_start_for_query(self, start: SecondsSinceUnixEpoch | None) -> SecondsSinceUnixEpoch | None: ... def _is_newer_than_start(self, doc_time: datetime | None, start: SecondsSinceUnixEpoch | None) -> bool: ... def confluence_client(self) -> OnyxConfluence: ... def low_timeout_confluence_client(self) -> OnyxConfluence: ... def set_credentials_provider(self, credentials_provider: CredentialsProviderInterface) -> None: ... def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None: ... def _construct_page_cql_query(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None) -> str: ... def _construct_attachment_query(self, confluence_page_id: str, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None) -> str: ... def _get_comment_string_for_page_id(self, page_id: str) -> str: ... def _convert_page_to_document(self, page: dict[str, Any]) -> Document | ConnectorFailure: ... def _fetch_document_batches(self, checkpoint: ConfluenceCheckpoint, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None) -> CheckpointOutput[ConfluenceCheckpoint]: ... def _build_page_retrieval_url(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, limit: int) -> str: ... def load_from_checkpoint(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch, checkpoint: ConfluenceCheckpoint) -> CheckpointOutput[ConfluenceCheckpoint]: ... def build_dummy_checkpoint(self) -> ConfluenceCheckpoint: ... def validate_checkpoint_json(self, checkpoint_json: str) -> ConfluenceCheckpoint: ... def retrieve_all_slim_docs(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback: IndexingHeartbeatInterface | None) -> GenerateSlimDocumentOutput: ... def retrieve_all_slim_docs_perm_sync(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback: IndexingHeartbeatInterface | None) -> GenerateSlimDocumentOutput: ... def _retrieve_all_slim_docs(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback: IndexingHeartbeatInterface | None, include_permissions: bool) -> GenerateSlimDocumentOutput: ... def validate_connector_settings(self) -> None: ... # Task: Write a Python method `_fetch_page_attachments` for the class `ConfluenceConnector` to inline attachments are added directly to the document as text or image sections by. Parameters: page: dict[str, Any], start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None Returns: tuple[list[Document], list[ConnectorFailure]]
def _fetch_page_attachments( self, page: dict[str, Any], start: SecondsSinceUnixEpoch | None = None, end: SecondsSinceUnixEpoch | None = None, ) -> tuple[list[Document], list[ConnectorFailure]]: """ Inline attachments are added directly to the document as text or image sections by this function. The returned documents/connectorfailures are for non-inline attachments and those at the end of the page. """ attachment_query = self._construct_attachment_query(page["id"], start, end) attachment_failures: list[ConnectorFailure] = [] attachment_docs: list[Document] = [] page_url = "" for attachment in self.confluence_client.paginated_cql_retrieval( cql=attachment_query, expand=",".join(_ATTACHMENT_EXPANSION_FIELDS), ): media_type: str = attachment.get("metadata", {}).get("mediaType", "") # TODO(rkuo): this check is partially redundant with validate_attachment_filetype # and checks in convert_attachment_to_content/process_attachment # but doing the check here avoids an unnecessary download. Due for refactoring. if not self.allow_images: if media_type.startswith("image/"): logging.info( f"Skipping attachment because allow images is False: {attachment['title']}" ) continue if not validate_attachment_filetype( attachment, ): logging.info( f"Skipping attachment because it is not an accepted file type: {attachment['title']}" ) continue logging.info( f"Processing attachment: {attachment['title']} attached to page {page['title']}" ) # Attachment document id: use the download URL for stable identity try: object_url = build_confluence_document_id( self.wiki_base, attachment["_links"]["download"], self.is_cloud ) except Exception as e: logging.warning( f"Invalid attachment url for id {attachment['id']}, skipping" ) logging.debug(f"Error building attachment url: {e}") continue try: response = convert_attachment_to_content( confluence_client=self.confluence_client, attachment=attachment, page_id=page["id"], allow_images=self.allow_images, ) if response is None: continue file_storage_name, file_blob = response if not file_blob: logging.info("Skipping attachment because it is no blob fetched") continue # Build attachment-specific metadata attachment_metadata: dict[str, str | list[str]] = {} if "space" in attachment: attachment_metadata["space"] = attachment["space"].get("name", "") labels: list[str] = [] if "metadata" in attachment and "labels" in attachment["metadata"]: for label in attachment["metadata"]["labels"].get("results", []): labels.append(label.get("name", "")) if labels: attachment_metadata["labels"] = labels page_url = page_url or build_confluence_document_id( self.wiki_base, page["_links"]["webui"], self.is_cloud ) attachment_metadata["parent_page_id"] = page_url attachment_id = build_confluence_document_id( self.wiki_base, attachment["_links"]["webui"], self.is_cloud ) # Build semantic identifier with space and page context attachment_title = attachment.get("title", object_url) space_name = page.get("space", {}).get("name", "") page_title = page.get("title", "") # Create hierarchical name: Space / Page / Attachment attachment_path_parts = [] if space_name: attachment_path_parts.append(space_name) if page_title: attachment_path_parts.append(page_title) attachment_path_parts.append(attachment_title) full_attachment_path = " / ".join(attachment_path_parts) if len(attachment_path_parts) > 1 else attachment_title # Track attachment names for duplicate detection if attachment_title not in self._document_name_counts: self._document_name_counts[attachment_title] = 0 self._document_name_paths[attachment_title] = [] self._document_name_counts[attachment_title] += 1 self._document_name_paths[attachment_title].append(full_attachment_path) # Use simple name if no duplicates, otherwise use full path if self._document_name_counts[attachment_title] == 1: attachment_semantic_identifier = attachment_title else: attachment_semantic_identifier = full_attachment_path primary_owners: list[BasicExpertInfo] | None = None if "version" in attachment and "by" in attachment["version"]: author = attachment["version"]["by"] display_name = author.get("displayName", "Unknown") email = author.get("email", "unknown@domain.invalid") primary_owners = [ BasicExpertInfo(display_name=display_name, email=email) ] extension = Path(attachment.get("title", "")).suffix or ".unknown" attachment_doc = Document( id=attachment_id, # sections=sections, source=DocumentSource.CONFLUENCE, semantic_identifier=attachment_semantic_identifier, extension=extension, blob=file_blob, size_bytes=len(file_blob), metadata=attachment_metadata, doc_updated_at=( datetime_from_string(attachment["version"]["when"]) if attachment.get("version") and attachment["version"].get("when") else None ), primary_owners=primary_owners, ) if self._is_newer_than_start(attachment_doc.doc_updated_at, start): attachment_docs.append(attachment_doc) except Exception as e: logging.error( f"Failed to extract/summarize attachment {attachment['title']}", exc_info=e, ) if is_atlassian_date_error(e): # propagate error to be caught and retried raise attachment_failures.append( ConnectorFailure( failed_document=DocumentFailure( document_id=object_url, document_link=object_url, ), failure_message=f"Failed to extract/summarize attachment {attachment['title']} for doc {object_url}", exception=e, ) ) return attachment_docs, attachment_failures
function_complex
1
{"cognitive_complexity": 58, "loc": 167, "code_loc": 128, "docstring_loc": 5, "function_name": "_fetch_page_attachments", "class_name": "ConfluenceConnector", "qualname": "ConfluenceConnector._fetch_page_attachments", "file_path": "common/data_source/confluence_connector.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/_internal/batch/benchmark/benchmark_processor.py:_build_serve_deployment_config
# Context: from ray.data.llm import ( ChatTemplateStageConfig, DetokenizeStageConfig, ServeDeploymentProcessorConfig, TokenizerStageConfig, build_processor, vLLMEngineProcessorConfig, ) from ray.serve.llm.openai_api_models import CompletionRequest class Mode(Enum): ... def build_vllm_engine_kwargs(**kwargs) -> dict: ... def _build_vllm_engine_config(model: str, batch_size: int, concurrency: int, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str, task_type: str, max_model_len: int) -> vLLMEngineProcessorConfig: ... class BenchmarkResult: ... def build_single_vllm_engine_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str): ... def build_shared_vllm_engine_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str): ... def build_classify_processor(batch_size: int, concurrency: int, model: str, pooling_params: dict, max_model_len: int, distributed_executor_backend: str): ... def setup_serve_deployment(model: str, concurrency: int) -> tuple[str, str]: ... def _is_app_ready(app_name: str) -> bool: ... def build_single_serve_deployment_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, deployment_name: str, app_name: str, **kwargs): ... def build_shared_serve_deployment_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, deployment_name: str, app_name: str, **kwargs): ... def run_processor(mode: Mode, dataset: data.Dataset, builder, **kwargs) -> BenchmarkResult: ... def benchmark(mode: Mode, dataset: data.Dataset, batch_size: int, concurrency: int, model: str, sampling_params: dict, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str) -> BenchmarkResult: ... def parse_args(argv: list[str]) -> argparse.Namespace: ... def main() -> None: ... # Task: Write a Python function `_build_serve_deployment_config` to helper to create ServeDeploymentProcessorConfig. Parameters: batch_size: int, concurrency: int, deployment_name: str, app_name: str Returns: ServeDeploymentProcessorConfig
def _build_serve_deployment_config( batch_size: int, concurrency: int, deployment_name: str = None, app_name: str = None, ) -> ServeDeploymentProcessorConfig: """Helper to create ServeDeploymentProcessorConfig.""" return ServeDeploymentProcessorConfig( deployment_name=deployment_name, app_name=app_name, dtype_mapping={ "CompletionRequest": CompletionRequest, }, batch_size=batch_size, concurrency=concurrency, )
function_simple
0
{"cognitive_complexity": 0, "loc": 16, "code_loc": 9, "docstring_loc": 1, "function_name": "_build_serve_deployment_config", "class_name": null, "qualname": "_build_serve_deployment_config", "file_path": "python/ray/llm/_internal/batch/benchmark/benchmark_processor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "plib_runnable"}
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_type_converter_component.py:TestTypeConverterComponent.test_dataframe_to_data
# Context: import pandas as pd from lfx.schema.data import Data from lfx.schema.dataframe import DataFrame class TestTypeConverterComponent(ComponentTestBaseWithoutClient): def component_class(self): ... def file_names_mapping(self): ... def test_message_to_message(self, component_class): ... def test_message_to_data(self, component_class): ... def test_message_to_dataframe(self, component_class): ... def test_data_to_message(self, component_class): ... def test_data_to_data(self, component_class): ... def test_data_to_dataframe(self, component_class): ... def test_dataframe_to_message(self, component_class): ... def test_dataframe_to_dataframe(self, component_class): ... def test_update_outputs(self, component_class): ... def test_message_with_valid_json_text_to_data(self, component_class): ... def test_message_with_invalid_json_text_to_data(self, component_class): ... def test_message_with_valid_json_array_to_data(self, component_class): ... def test_message_with_valid_csv_to_data(self, component_class): ... def test_message_with_valid_csv_to_dataframe(self, component_class): ... def test_message_with_valid_json_object_to_dataframe(self, component_class): ... def test_message_with_valid_json_array_to_dataframe(self, component_class): ... def test_message_with_compact_json_array_to_dataframe(self, component_class): ... # Task: Write a Python test method `test_dataframe_to_data` in test class `TestTypeConverterComponent` to test converting DataFrame to Data. Module under test: io, lfx.components.processing.converter, lfx.schema.data
def test_dataframe_to_data(self, component_class): """Test converting DataFrame to Data.""" df_data = pd.DataFrame({"col1": ["Hello"]}) component = component_class(input_data=DataFrame(data=df_data), output_type="Data") result = component.convert_to_data() assert isinstance(result, Data) assert isinstance(result.data, dict)
test
1
{"function_name": "test_dataframe_to_data", "class_name": "TestTypeConverterComponent", "qualname": "TestTypeConverterComponent.test_dataframe_to_data", "file_path": "src/backend/tests/unit/components/processing/test_type_converter_component.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["io", "lfx.components.processing.converter", "lfx.schema.data", "lfx.schema.dataframe", "lfx.schema.message"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/ssh/tests/unit/ssh/hooks/test_ssh_async.py:TestSSHHookAsync.test_parse_extras_host_key
# Context: from unittest import mock from airflow.providers.ssh.hooks.ssh import SSHHookAsync class TestSSHHookAsync: def test_init_with_conn_id(self): ... def test_init_with_overrides(self): ... def test_init_default_known_hosts(self): ... def test_parse_extras_key_file(self): ... def test_parse_extras_no_host_key_check(self): ... def test_parse_extras_host_key_with_no_check_raises(self): ... def test_parse_extras_private_key(self): ... async def test_get_conn_builds_config(self): ... async def test_run_command(self): ... async def test_run_command_output(self): ... # Task: Write a Python test method `test_parse_extras_host_key` in test class `TestSSHHookAsync` to test parsing host_key from connection extras. Module under test: __future__, airflow.providers.ssh.hooks.ssh
def test_parse_extras_host_key(self): """Test parsing host_key from connection extras.""" hook = SSHHookAsync(ssh_conn_id="test_conn") mock_conn = mock.MagicMock() mock_conn.extra_dejson = {"host_key": "ssh-rsa AAAAB3...", "no_host_key_check": "false"} mock_conn.host = "test.host" hook._parse_extras(mock_conn) assert hook.known_hosts == b"test.host ssh-rsa AAAAB3..."
test
1
{"function_name": "test_parse_extras_host_key", "class_name": "TestSSHHookAsync", "qualname": "TestSSHHookAsync.test_parse_extras_host_key", "file_path": "providers/ssh/tests/unit/ssh/hooks/test_ssh_async.py", "repo_id": "apache/airflow", "loc": 9, "tested_modules": ["__future__", "airflow.providers.ssh.hooks.ssh"], "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/browser/session.py:BrowserSession.get_target_id_from_tab_id
# Context: from cdp_use.cdp.target import SessionID, TargetID class Target(BaseModel): ... class CDPSession(BaseModel): ... class BrowserSession(BaseModel): model_config = ConfigDict( def __init__( self, *, # Cloud browser params - use these for cloud mode cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = None, cloud_timeout: int | None = None, # Backward compatibility aliases profile_id: UUID | str | None = None, proxy_country_code: ProxyCountryCode | None = None, timeout: int | None = None, use_cloud: bool | None = None, cloud_browser: bool | None = None, # Backward compatibility alias cloud_browser_params: CloudBrowserParams | None = None, # Common params that work with cloud id: str | None = None, headers: dict[str, str] | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, captcha_solver: bool | None = None, auto_download_pdfs: bool | None = None, cookie_whitelist_domains: list[str] | None = None, cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, max_iframes: int | None = None, max_iframe_depth: int | None = None, ) -> None: ... def __init__( self, *, # Core configuration for local id: str | None = None, cdp_url: str | None = None, browser_profile: BrowserProfile | None = None, # Local browser launch params executable_path: str | Path | None = None, headless: bool | None = None, user_data_dir: str | Path | None = None, args: list[str] | None = None, downloads_path: str | Path | None = None, # Common params headers: dict[str, str] | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, auto_download_pdfs: bool | None = None, cookie_whitelist_domains: list[str] | None = None, cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, max_iframes: int | None = None, max_iframe_depth: int | None = None, # All other local params env: dict[str, str | float | bool] | None = None, ignore_default_args: list[str] | Literal[True] | None = None, channel: str | None = None, chromium_sandbox: bool | None = None, devtools: bool | None = None, traces_dir: str | Path | None = None, accept_downloads: bool | None = None, permissions: list[str] | None = None, user_agent: str | None = None, screen: dict | None = None, viewport: dict | None = None, no_viewport: bool | None = None, device_scale_factor: float | None = None, record_har_content: str | None = None, record_har_mode: str | None = None, record_har_path: str | Path | None = None, record_video_dir: str | Path | None = None, record_video_framerate: int | None = None, record_video_size: dict | None = None, storage_state: str | Path | dict[str, Any] | None = None, disable_security: bool | None = None, deterministic_rendering: bool | None = None, proxy: ProxySettings | None = None, enable_default_extensions: bool | None = None, captcha_solver: bool | None = None, window_size: dict | None = None, window_position: dict | None = None, filter_highlight_ids: bool | None = None, profile_directory: str | None = None, ) -> None: ... def __init__( self, # Core configuration id: str | None = None, cdp_url: str | None = None, is_local: bool = False, browser_profile: BrowserProfile | None = None, # Cloud browser params (don't mix with local browser params) cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = None, cloud_timeout: int | None = None, # Backward compatibility aliases for cloud params profile_id: UUID | str | None = None, proxy_country_code: ProxyCountryCode | None = None, timeout: int | None = None, # BrowserProfile fields that can be passed directly # From BrowserConnectArgs headers: dict[str, str] | None = None, # From BrowserLaunchArgs env: dict[str, str | float | bool] | None = None, executable_path: str | Path | None = None, headless: bool | None = None, args: list[str] | None = None, ignore_default_args: list[str] | Literal[True] | None = None, channel: str | None = None, chromium_sandbox: bool | None = None, devtools: bool | None = None, downloads_path: str | Path | None = None, traces_dir: str | Path | None = None, # From BrowserContextArgs accept_downloads: bool | None = None, permissions: list[str] | None = None, user_agent: str | None = None, screen: dict | None = None, viewport: dict | None = None, no_viewport: bool | None = None, device_scale_factor: float | None = None, record_har_content: str | None = None, record_har_mode: str | None = None, record_har_path: str | Path | None = None, record_video_dir: str | Path | None = None, record_video_framerate: int | None = None, record_video_size: dict | None = None, # From BrowserLaunchPersistentContextArgs user_data_dir: str | Path | None = None, # From BrowserNewContextArgs storage_state: str | Path | dict[str, Any] | None = None, # BrowserProfile specific fields ## Cloud Browser Fields use_cloud: bool | None = None, cloud_browser: bool | None = None, # Backward compatibility alias cloud_browser_params: CloudBrowserParams | None = None, ## Other params disable_security: bool | None = None, deterministic_rendering: bool | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, proxy: ProxySettings | None = None, enable_default_extensions: bool | None = None, captcha_solver: bool | None = None, window_size: dict | None = None, window_position: dict | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, filter_highlight_ids: bool | None = None, auto_download_pdfs: bool | None = None, profile_directory: str | None = None, cookie_whitelist_domains: list[str] | None = None, # DOM extraction layer configuration cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, # Iframe processing limits max_iframes: int | None = None, max_iframe_depth: int | None = None, ): # Following the same pattern as AgentSettings in service.py # Only pass non-None values to avoid validation errors profile_kwargs = { k: v for k, v in locals().items() if k not in [ 'self', 'browser_profile', 'id', 'cloud_profile_id', 'cloud_proxy_country_code', 'cloud_timeout', 'profile_id', 'proxy_country_code', 'timeout', ] and v is not None } # Handle backward compatibility: prefer cloud_* params over old names final_profile_id = cloud_profile_id if cloud_profile_id is not None else profile_id final_proxy_country_code = cloud_proxy_country_code if cloud_proxy_country_code is not None else proxy_country_code final_timeout = cloud_timeout if cloud_timeout is not None else timeout # If any cloud params are provided, create cloud_browser_params if final_profile_id is not None or final_proxy_country_code is not None or final_timeout is not None: cloud_params = CreateBrowserRequest( cloud_profile_id=final_profile_id, cloud_proxy_country_code=final_proxy_country_code, cloud_timeout=final_timeout, ) profile_kwargs['cloud_browser_params'] = cloud_params profile_kwargs['use_cloud'] = True # Handle backward compatibility: map cloud_browser to use_cloud if 'cloud_browser' in profile_kwargs: profile_kwargs['use_cloud'] = profile_kwargs.pop('cloud_browser') # If cloud_browser_params is set, force use_cloud=True if cloud_browser_params is not None: profile_kwargs['use_cloud'] = True # if is_local is False but executable_path is provided, set is_local to True if is_local is False and executable_path is not None: profile_kwargs['is_local'] = True # Only set is_local=True when cdp_url is missing if we're not using cloud browser # (cloud browser will provide cdp_url later) use_cloud = profile_kwargs.get('use_cloud') or profile_kwargs.get('cloud_browser') if not cdp_url and not use_cloud: profile_kwargs['is_local'] = True # Create browser profile from direct parameters or use provided one if browser_profile is not None: # Merge any direct kwargs into the provided browser_profile (direct kwargs take precedence) merged_kwargs = {**browser_profile.model_dump(exclude_unset=True), **profile_kwargs} resolved_browser_profile = BrowserProfile(**merged_kwargs) else: resolved_browser_profile = BrowserProfile(**profile_kwargs) # Initialize the Pydantic model super().__init__( id=id or str(uuid7str()), browser_profile=resolved_browser_profile, ) def from_system_chrome(cls, profile_directory: str | None, **kwargs) -> Self: ... def list_chrome_profiles(cls) -> list[dict[str, str]]: ... def cdp_url(self) -> str | None: ... def is_local(self) -> bool: ... def is_cdp_connected(self) -> bool: ... async def wait_if_captcha_solving(self, timeout: float | None) -> 'CaptchaWaitResult | None': ... def is_reconnecting(self) -> bool: ... def cloud_browser(self) -> bool: ... def demo_mode(self) -> 'DemoMode | None': ... def logger(self) -> Any: ... def _id_for_logs(self) -> str: ... def _tab_id_for_logs(self) -> str: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... async def reset(self) -> None: ... def model_post_init(self, __context) -> None: ... async def start(self) -> None: ... async def kill(self) -> None: ... async def stop(self) -> None: ... async def on_BrowserStartEvent(self, event: BrowserStartEvent) -> dict[str, str]: ... async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None: ... async def _navigate_and_wait(self, url: str, target_id: str, timeout: float | None, wait_until: str) -> None: ... async def on_SwitchTabEvent(self, event: SwitchTabEvent) -> TargetID: ... async def on_CloseTabEvent(self, event: CloseTabEvent) -> None: ... async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None: ... async def on_TabClosedEvent(self, event: TabClosedEvent) -> None: ... async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None: ... async def on_FileDownloadedEvent(self, event: FileDownloadedEvent) -> None: ... async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None: ... def cdp_client(self) -> CDPClient: ... async def new_page(self, url: str | None) -> 'Page': ... async def get_current_page(self) -> 'Page | None': ... async def must_get_current_page(self) -> 'Page': ... async def get_pages(self) -> list['Page']: ... def get_focused_target(self) -> 'Target | None': ... def get_page_targets(self) -> list['Target']: ... async def close_page(self, page: 'Union[Page, str]') -> None: ... async def cookies(self) -> list['Cookie']: ... async def clear_cookies(self) -> None: ... async def export_storage_state(self, output_path: str | Path | None) -> dict[str, Any]: ... async def get_or_create_cdp_session(self, target_id: TargetID | None, focus: bool) -> CDPSession: ... async def set_extra_headers(self, headers: dict[str, str], target_id: TargetID | None) -> None: ... async def get_browser_state_summary(self, include_screenshot: bool, cached: bool, include_recent_events: bool) -> BrowserStateSummary: ... async def get_state_as_text(self) -> str: ... async def attach_all_watchdogs(self) -> None: ... async def connect(self, cdp_url: str | None) -> Self: ... async def _setup_proxy_auth(self) -> None: ... async def reconnect(self) -> None: ... async def _auto_reconnect(self, max_attempts: int) -> None: ... def _attach_ws_drop_callback(self) -> None: ... async def get_tabs(self) -> list[TabInfo]: ... async def get_current_target_info(self) -> TargetInfo | None: ... async def get_current_page_url(self) -> str: ... async def get_current_page_title(self) -> str: ... async def navigate_to(self, url: str, new_tab: bool) -> None: ... async def get_dom_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... def update_cached_selector_map(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> None: ... async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... async def get_dom_element_at_coordinates(self, x: int, y: int) -> EnhancedDOMTreeNode | None: ... async def get_target_id_from_url(self, url: str) -> TargetID: ... async def get_most_recently_opened_target_id(self) -> TargetID: ... def is_file_input(self, element: Any) -> bool: ... async def get_selector_map(self) -> dict[int, EnhancedDOMTreeNode]: ... async def get_index_by_id(self, element_id: str) -> int | None: ... async def get_index_by_class(self, class_name: str) -> int | None: ... async def remove_highlights(self) -> None: ... async def get_element_coordinates(self, backend_node_id: int, cdp_session: CDPSession) -> DOMRect | None: ... async def highlight_interaction_element(self, node: 'EnhancedDOMTreeNode') -> None: ... async def highlight_coordinate_click(self, x: int, y: int) -> None: ... async def add_highlights(self, selector_map: dict[int, 'EnhancedDOMTreeNode']) -> None: ... async def _close_extension_options_pages(self) -> None: ... async def send_demo_mode_log(self, message: str, level: str, metadata: dict[str, Any] | None) -> None: ... def downloaded_files(self) -> list[str]: ... async def _cdp_get_all_pages(self, include_http: bool, include_about: bool, include_pages: bool, include_iframes: bool, include_workers: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool) -> list[TargetInfo]: ... async def _cdp_create_new_page(self, url: str, background: bool, new_window: bool) -> str: ... async def _cdp_close_page(self, target_id: TargetID) -> None: ... async def _cdp_get_cookies(self) -> list[Cookie]: ... async def _cdp_set_cookies(self, cookies: list[Cookie]) -> None: ... async def _cdp_clear_cookies(self) -> None: ... async def _cdp_grant_permissions(self, permissions: list[str], origin: str | None) -> None: ... async def _cdp_set_geolocation(self, latitude: float, longitude: float, accuracy: float) -> None: ... async def _cdp_clear_geolocation(self) -> None: ... async def _cdp_add_init_script(self, script: str) -> str: ... async def _cdp_remove_init_script(self, identifier: str) -> None: ... async def _cdp_set_viewport(self, width: int, height: int, device_scale_factor: float, mobile: bool, target_id: str | None) -> None: ... async def _cdp_get_origins(self) -> list[dict[str, Any]]: ... async def _cdp_get_storage_state(self) -> dict: ... async def _cdp_navigate(self, url: str, target_id: TargetID | None) -> None: ... def _is_valid_target(target_info: TargetInfo, include_http: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool, include_about: bool, include_iframes: bool, include_pages: bool, include_workers: bool) -> bool: ... async def get_all_frames(self) -> tuple[dict[str, dict], dict[str, str]]: ... async def _populate_frame_metadata(self, all_frames: dict[str, dict], target_sessions: dict[str, str]) -> None: ... async def find_frame_target(self, frame_id: str, all_frames: dict[str, dict] | None) -> dict | None: ... async def cdp_client_for_target(self, target_id: TargetID) -> CDPSession: ... async def cdp_client_for_frame(self, frame_id: str) -> CDPSession: ... async def cdp_client_for_node(self, node: EnhancedDOMTreeNode) -> CDPSession: ... async def take_screenshot(self, path: str | None, full_page: bool, format: str, quality: int | None, clip: dict | None) -> bytes: ... async def screenshot_element(self, selector: str, path: str | None, format: str, quality: int | None) -> bytes: ... async def _get_element_bounds(self, selector: str) -> dict | None: ... # Task: Write a Python async method `get_target_id_from_tab_id` for the class `BrowserSession` to get the full-length TargetID from the truncated 4-char tab_id using SessionManager. Parameters: tab_id: str Returns: TargetID
async def get_target_id_from_tab_id(self, tab_id: str) -> TargetID: """Get the full-length TargetID from the truncated 4-char tab_id using SessionManager.""" if not self.session_manager: raise RuntimeError('SessionManager not initialized') for full_target_id in self.session_manager.get_all_target_ids(): if full_target_id.endswith(tab_id): if await self.session_manager.is_target_valid(full_target_id): return full_target_id # Stale target - Chrome should have sent detach event # If we're here, event listener will clean it up self.logger.debug(f'Found stale target {full_target_id}, skipping') raise ValueError(f'No TargetID found ending in tab_id=...{tab_id}')
function_complex
0
{"cognitive_complexity": 7, "loc": 14, "code_loc": 8, "docstring_loc": 1, "function_name": "get_target_id_from_tab_id", "class_name": "BrowserSession", "qualname": "BrowserSession.get_target_id_from_tab_id", "file_path": "browser_use/browser/session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/a2a/utils/test_task.py:TestExecute.test_emits_completed_event
# Context: from unittest.mock import AsyncMock, MagicMock, patch import pytest from crewai.a2a.utils.task import cancel, cancellable, execute def mock_agent() -> MagicMock: ... def mock_task(mock_context: MagicMock) -> MagicMock: ... def mock_context() -> MagicMock: ... def mock_event_queue() -> AsyncMock: ... async def clear_cache(mock_context: MagicMock) -> None: ... class TestCancellableDecorator: ... class TestCancel: ... class TestExecuteAndCancelIntegration: ... class TestExecute: async def test_successful_execution(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... async def test_emits_started_event(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... async def test_emits_failed_event_on_exception(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... async def test_emits_canceled_event_on_cancellation(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... # Task: Write a Python test method `test_emits_completed_event` in test class `TestExecute` to execute emits A2AServerTaskCompletedEvent on success. Module under test: __future__, typing, a2a.server.agent_execution
async def test_emits_completed_event( self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock, ) -> None: """Execute emits A2AServerTaskCompletedEvent on success.""" with ( patch("crewai.a2a.utils.task.Task", return_value=mock_task), patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus, ): await execute(mock_agent, mock_context, mock_event_queue) second_call = mock_bus.emit.call_args_list[1] event = second_call[0][1] assert event.type == "a2a_server_task_completed" assert event.task_id == mock_context.task_id assert event.result == "Task completed successfully"
test
0
{"function_name": "test_emits_completed_event", "class_name": "TestExecute", "qualname": "TestExecute.test_emits_completed_event", "file_path": "lib/crewai/tests/a2a/utils/test_task.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["__future__", "typing", "a2a.server.agent_execution", "a2a.server.events", "a2a.types"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/tests/test_default_cluster_autoscaler_v2.py:TestClusterAutoscaling.test_get_node_resource_spec_and_count_from_zero
# Context: from unittest.mock import MagicMock, patch from ray.core.generated import autoscaler_pb2 from ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2 import ( DefaultClusterAutoscalerV2, _get_node_resource_spec_and_count, _NodeResourceSpec, ) class StubUtilizationGauge(ResourceUtilizationGauge): ... class TestClusterAutoscaling: def setup_class(self): ... def teardown_class(self): ... def test_get_node_resource_spec_and_count(self): ... def test_try_scale_up_cluster(self, cpu_util, gpu_util, mem_util): ... def test_try_scale_up_cluster_from_zero(self): ... def test_low_utilization_sends_current_allocation(self): ... def test_get_node_resource_spec_and_count_skips_max_count_zero(self): ... def test_get_node_resource_spec_and_count_missing_all_resources(self): ... def test_try_scale_up_respects_resource_limits(self, resource_limits, node_spec, existing_nodes, scale_up_increment, expected_nodes): ... def test_try_scale_up_respects_resource_limits_heterogeneous_nodes(self): ... def test_try_scale_up_existing_nodes_prioritized_over_delta(self): ... def test_try_scale_up_logs_info_message(self, propagate_logs, caplog): ... # Task: Write a Python test method `test_get_node_resource_spec_and_count_from_zero` in test class `TestClusterAutoscaling` to test that get_node_resource_spec_and_count can discover node types. Module under test: ray.core.generated, ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2, ray.data._internal.cluster_autoscaler.fake_autoscaling_coordinator
def test_get_node_resource_spec_and_count_from_zero(self): """Test that get_node_resource_spec_and_count can discover node types from cluster config even when there are zero worker nodes.""" # Simulate a cluster with only head node (no worker nodes) node_table = [ { "Resources": self._head_node, "Alive": True, }, ] # Create a mock cluster config with 2 worker node types cluster_config = autoscaler_pb2.ClusterConfig() # Node type 1: 4 CPU, 0 GPU, 1000 memory node_group_config1 = autoscaler_pb2.NodeGroupConfig() node_group_config1.resources["CPU"] = 4 node_group_config1.resources["memory"] = 1000 node_group_config1.max_count = 10 cluster_config.node_group_configs.append(node_group_config1) # Node type 2: 8 CPU, 2 GPU, 2000 memory node_group_config2 = autoscaler_pb2.NodeGroupConfig() node_group_config2.resources["CPU"] = 8 node_group_config2.resources["GPU"] = 2 node_group_config2.resources["memory"] = 2000 node_group_config2.max_count = 5 cluster_config.node_group_configs.append(node_group_config2) expected = { _NodeResourceSpec.of(cpu=4, gpu=0, mem=1000): 0, _NodeResourceSpec.of(cpu=8, gpu=2, mem=2000): 0, } with patch("ray.nodes", return_value=node_table): with patch( "ray._private.state.state.get_cluster_config", return_value=cluster_config, ): result = _get_node_resource_spec_and_count() assert result == expected
test
0
{"function_name": "test_get_node_resource_spec_and_count_from_zero", "class_name": "TestClusterAutoscaling", "qualname": "TestClusterAutoscaling.test_get_node_resource_spec_and_count_from_zero", "file_path": "python/ray/data/tests/test_default_cluster_autoscaler_v2.py", "repo_id": "ray-project/ray", "loc": 41, "tested_modules": ["ray.core.generated", "ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2", "ray.data._internal.cluster_autoscaler.fake_autoscaling_coordinator", "ray.data._internal.cluster_autoscaler.resource_utilization_gauge", "ray.data._internal.execution.interfaces.execution_options"], "has_docstring": true, "runnable_level": "class_runnable"}
ccxt/ccxt:python/ccxt/static_dependencies/bip/utils/crypto/blake2.py:Blake2b.QuickDigest
# Context: import hashlib from typing import Union from ..misc import AlgoUtils class _Blake2bWithSpecificSize(ABC): ... class Blake2b32(_Blake2bWithSpecificSize): ... class Blake2b40(_Blake2bWithSpecificSize): ... class Blake2b160(_Blake2bWithSpecificSize): ... class Blake2b224(_Blake2bWithSpecificSize): ... class Blake2b256(_Blake2bWithSpecificSize): ... class Blake2b512(_Blake2bWithSpecificSize): ... class Blake2b: # Task: Write a Python method `QuickDigest` for the class `Blake2b` to compute the digest (quick version). Parameters: data: Union[bytes, str], digest_size: int, key: Union[bytes, str], salt: Union[bytes, str] Returns: bytes
def QuickDigest(data: Union[bytes, str], digest_size: int, key: Union[bytes, str] = b"", salt: Union[bytes, str] = b"") -> bytes: """ Compute the digest (quick version). Args: data (str or bytes) : Data digest_size (int) : Digest size key ((str or bytes, optional) : Key (default: empty) salt ((str or bytes, optional): Salt (default: empty) Returns: bytes: Computed digest """ return hashlib.blake2b(AlgoUtils.Encode(data), digest_size=digest_size, key=AlgoUtils.Encode(key), salt=AlgoUtils.Encode(salt)).digest()
function_simple
1
{"cognitive_complexity": 0, "loc": 20, "code_loc": 4, "docstring_loc": 12, "function_name": "QuickDigest", "class_name": "Blake2b", "qualname": "Blake2b.QuickDigest", "file_path": "python/ccxt/static_dependencies/bip/utils/crypto/blake2.py", "repo_id": "ccxt/ccxt", "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_prompt_caching.py:TestCollectCodeExecutionToolIds.test_no_code_execution_calls
# Context: from langchain_anthropic.chat_models import ( ChatAnthropic, _collect_code_execution_tool_ids, _is_code_execution_related_block, ) class FakeToolCallingModel(BaseChatModel): ... def test_anthropic_prompt_caching_middleware_initialization() -> None: ... def test_anthropic_prompt_caching_middleware_unsupported_model() -> None: ... async def test_anthropic_prompt_caching_middleware_async() -> None: ... async def test_anthropic_prompt_caching_middleware_async_unsupported_model() -> None: ... async def test_anthropic_prompt_caching_middleware_async_min_messages() -> None: ... async def test_anthropic_prompt_caching_middleware_async_with_system_prompt() -> None: ... async def test_anthropic_prompt_caching_middleware_async_default_values() -> None: ... class TestIsCodeExecutionRelatedBlock: ... class TestCollectCodeExecutionToolIds: def test_empty_messages(self) -> None: ... def test_single_code_execution_call(self) -> None: ... def test_multiple_code_execution_calls(self) -> None: ... def test_future_code_execution_version(self) -> None: ... def test_ignores_user_messages(self) -> None: ... def test_handles_string_content(self) -> None: ... # Task: Write a Python test method `test_no_code_execution_calls` in test class `TestCollectCodeExecutionToolIds` to test messages without any code_execution calls. Module under test: typing, langchain.agents.middleware.types, langchain_core.callbacks
def test_no_code_execution_calls(self) -> None: """Test messages without any code_execution calls.""" messages = [ { "role": "user", "content": [{"type": "text", "text": "Hello"}], }, { "role": "assistant", "content": [ { "type": "tool_use", "id": "toolu_regular", "name": "get_weather", "input": {"location": "NYC"}, } ], }, ] result = _collect_code_execution_tool_ids(messages) assert result == set()
test
1
{"function_name": "test_no_code_execution_calls", "class_name": "TestCollectCodeExecutionToolIds", "qualname": "TestCollectCodeExecutionToolIds.test_no_code_execution_calls", "file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_prompt_caching.py", "repo_id": "langchain-ai/langchain", "loc": 21, "tested_modules": ["typing", "langchain.agents.middleware.types", "langchain_core.callbacks", "langchain_core.language_models", "langchain_core.messages"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/test/footsies_suppress_unity_logs.py:TestFootsies.test_default_supress_output_mode
# Context: import os import time from pathlib import Path def _create_env(config_overrides): ... def capture_stdout_stderr(): ... class TestFootsies(unittest.TestCase): def test_enable_output_mode(self): ... # Task: Write a Python test method `test_default_supress_output_mode` in test class `TestFootsies` to verify the behavior of `default_supress_output_mode`. Module under test: contextlib, pathlib, ray.rllib.env
def test_default_supress_output_mode(self): with capture_stdout_stderr() as log_path: env = _create_env({}) time.sleep(2) # Give Unity time to write output env.close() # Give a bit more time for any buffered output to be written time.sleep(0.5) # Read the captured output with open(log_path, "r") as f: captured_output = f.read() assert ( "`log_unity_output` not set in environment config, not logging output by default" in captured_output ) assert "[UnityMemory]" not in captured_output # Clean up if Path(log_path).exists(): os.unlink(log_path)
test
0
{"function_name": "test_default_supress_output_mode", "class_name": "TestFootsies", "qualname": "TestFootsies.test_default_supress_output_mode", "file_path": "rllib/examples/envs/classes/multi_agent/footsies/test/footsies_suppress_unity_logs.py", "repo_id": "ray-project/ray", "loc": 21, "tested_modules": ["contextlib", "pathlib", "ray.rllib.env", "ray.rllib.examples.envs.classes.multi_agent.footsies.footsies_env"], "has_docstring": false, "runnable_level": "file_runnable"}
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py:HunyuanVideo15ImageToVideoPipeline.prepare_cond_latents_and_mask
# Context: import PIL import torch def format_text_input(prompt: list[str], system_message: str) -> list[dict[str, Any]]: ... def extract_glyph_texts(prompt: str) -> list[str]: ... def retrieve_latents(encoder_output: torch.Tensor, generator: torch.Generator | None, sample_mode: str): ... def retrieve_timesteps(scheduler, num_inference_steps: int | None, device: str | torch.device | None, timesteps: list[int] | None, sigmas: list[float] | None, **kwargs): ... class HunyuanVideo15ImageToVideoPipeline(DiffusionPipeline): model_cpu_offload_seq = "image_encoder->text_encoder->transformer->vae" def __init__( self, text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, transformer: HunyuanVideo15Transformer3DModel, vae: AutoencoderKLHunyuanVideo15, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: T5EncoderModel, tokenizer_2: ByT5Tokenizer, guider: ClassifierFreeGuidance, image_encoder: SiglipVisionModel, feature_extractor: SiglipImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, guider=guider, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 16 self.video_processor = HunyuanVideo15ImageProcessor( vae_scale_factor=self.vae_scale_factor_spatial, do_resize=False, do_convert_rgb=True ) self.target_size = self.transformer.config.target_size if getattr(self, "transformer", None) else 640 self.vision_states_dim = ( self.transformer.config.image_embed_dim if getattr(self, "transformer", None) else 1152 ) self.num_channels_latents = self.vae.config.latent_channels if hasattr(self, "vae") else 32 # fmt: off self.system_message = "You are a helpful assistant. Describe the video by detailing the following aspects: \ 1. The main content and theme of the video. \ 2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects. \ 3. Actions, events, behaviors temporal relationships, physical movement changes of the objects. \ 4. background environment, light, style and atmosphere. \ 5. camera angles, movements, and transitions used in the video." # fmt: on self.prompt_template_encode_start_idx = 108 self.tokenizer_max_length = 1000 self.tokenizer_2_max_length = 256 self.vision_num_semantic_tokens = 729 def _get_mllm_prompt_embeds(text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, prompt: str | list[str], device: torch.device, tokenizer_max_length: int, num_hidden_layers_to_skip: int, system_message: str, crop_start: int) -> tuple[torch.Tensor, torch.Tensor]: ... def _get_byt5_prompt_embeds(tokenizer: ByT5Tokenizer, text_encoder: T5EncoderModel, prompt: str | list[str], device: torch.device, tokenizer_max_length: int): ... def _get_image_latents(vae: AutoencoderKLHunyuanVideo15, image_processor: HunyuanVideo15ImageProcessor, image: PIL.Image.Image, height: int, width: int, device: torch.device) -> torch.Tensor: ... def _get_image_embeds(image_encoder: SiglipVisionModel, feature_extractor: SiglipImageProcessor, image: PIL.Image.Image, device: torch.device) -> torch.Tensor: ... def encode_image(self, image: PIL.Image.Image, batch_size: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor: ... def encode_prompt(self, prompt: str | list[str], device: torch.device | None, dtype: torch.dtype | None, batch_size: int, num_videos_per_prompt: int, prompt_embeds: torch.Tensor | None, prompt_embeds_mask: torch.Tensor | None, prompt_embeds_2: torch.Tensor | None, prompt_embeds_mask_2: torch.Tensor | None): ... def check_inputs(self, prompt, image: PIL.Image.Image, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_embeds_mask, negative_prompt_embeds_mask, prompt_embeds_2, prompt_embeds_mask_2, negative_prompt_embeds_2, negative_prompt_embeds_mask_2): ... def prepare_latents(self, batch_size: int, num_channels_latents: int, height: int, width: int, num_frames: int, dtype: torch.dtype | None, device: torch.device | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None) -> torch.Tensor: ... def num_timesteps(self): ... def attention_kwargs(self): ... def current_timestep(self): ... def interrupt(self): ... def __call__(self, image: PIL.Image.Image, prompt: str | list[str], negative_prompt: str | list[str], num_frames: int, num_inference_steps: int, sigmas: list[float], num_videos_per_prompt: int | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None, prompt_embeds: torch.Tensor | None, prompt_embeds_mask: torch.Tensor | None, negative_prompt_embeds: torch.Tensor | None, negative_prompt_embeds_mask: torch.Tensor | None, prompt_embeds_2: torch.Tensor | None, prompt_embeds_mask_2: torch.Tensor | None, negative_prompt_embeds_2: torch.Tensor | None, negative_prompt_embeds_mask_2: torch.Tensor | None, output_type: str | None, return_dict: bool, attention_kwargs: dict[str, Any] | None): ... # Task: Write a Python method `prepare_cond_latents_and_mask` for the class `HunyuanVideo15ImageToVideoPipeline` to prepare conditional latents and mask for t2v generation. Parameters: latents: torch.Tensor, image: PIL.Image.Image, batch_size: int, height: int, width: int, dtype: torch.dtype, device: torch.device
def prepare_cond_latents_and_mask( self, latents: torch.Tensor, image: PIL.Image.Image, batch_size: int, height: int, width: int, dtype: torch.dtype, device: torch.device, ): """ Prepare conditional latents and mask for t2v generation. Args: latents: Main latents tensor (B, C, F, H, W) Returns: tuple: (cond_latents_concat, mask_concat) - both are zero tensors for t2v """ batch, channels, frames, height, width = latents.shape image_latents = self._get_image_latents( vae=self.vae, image_processor=self.video_processor, image=image, height=height, width=width, device=device, ) latent_condition = image_latents.repeat(batch_size, 1, frames, 1, 1) latent_condition[:, :, 1:, :, :] = 0 latent_condition = latent_condition.to(device=device, dtype=dtype) latent_mask = torch.zeros(batch, 1, frames, height, width, dtype=dtype, device=device) latent_mask[:, :, 0, :, :] = 1.0 return latent_condition, latent_mask
function_simple
1
{"cognitive_complexity": 0, "loc": 39, "code_loc": 15, "docstring_loc": 9, "function_name": "prepare_cond_latents_and_mask", "class_name": "HunyuanVideo15ImageToVideoPipeline", "qualname": "HunyuanVideo15ImageToVideoPipeline.prepare_cond_latents_and_mask", "file_path": "src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/nvfp4_utils.py:prepare_weights_for_nvfp4_flashinfer_trtllm
# Context: import torch from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a class NvFp4LinearBackend(Enum): ... def select_nvfp4_linear_backend() -> NvFp4LinearBackend: ... def prepare_weights_for_nvfp4_cutlass(weight: torch.Tensor, weight_scale: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, int]: ... def prepare_weights_for_nvfp4_fbgemm(weight: torch.Tensor, weight_scale: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: ... def convert_to_nvfp4_linear_kernel_format(backend: NvFp4LinearBackend, layer: torch.nn.Module) -> None: ... def apply_nvfp4_linear(backend: NvFp4LinearBackend, layer: torch.nn.Module, x: torch.Tensor, bias: torch.Tensor | None) -> torch.Tensor: ... def swizzle_blockscale(scale: torch.Tensor) -> torch.Tensor: ... def cutlass_fp4_supported() -> bool: ... def pad_nvfp4_weight_for_cutlass(weight: torch.Tensor, alignment: int) -> tuple[torch.Tensor, int]: ... def pad_nvfp4_activation_for_cutlass(x_fp4: torch.Tensor, weights_padding_bytes: int) -> torch.Tensor: ... def slice_nvfp4_output(out: torch.Tensor, output_size: int) -> torch.Tensor: ... # Task: Write a Python function `prepare_weights_for_nvfp4_flashinfer_trtllm` to prepare weights and scales for FlashInfer TRTLLM FP4 GEMM. Parameters: weight: torch.Tensor, weight_scale: torch.Tensor Returns: tuple[torch.Tensor, torch.Tensor]
def prepare_weights_for_nvfp4_flashinfer_trtllm( weight: torch.Tensor, weight_scale: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: """Prepare weights and scales for FlashInfer TRTLLM FP4 GEMM.""" from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a epilogue_tile_m = 128 shuffled_weight = shuffle_matrix_a(weight.view(torch.uint8), epilogue_tile_m) shuffled_weight_scale = ( shuffle_matrix_sf_a(weight_scale.view(torch.uint8), epilogue_tile_m) .reshape(weight_scale.shape) .view(torch.float8_e4m3fn) ) return shuffled_weight, shuffled_weight_scale
function_simple
1
{"cognitive_complexity": 0, "loc": 16, "code_loc": 9, "docstring_loc": 1, "function_name": "prepare_weights_for_nvfp4_flashinfer_trtllm", "class_name": null, "qualname": "prepare_weights_for_nvfp4_flashinfer_trtllm", "file_path": "vllm/model_executor/layers/quantization/utils/nvfp4_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/_internal/execution/operators/hash_shuffle.py:ShuffleAggregation.is_compacting
Write a Python method `is_compacting` for the class `ShuffleAggregation` to returns whether this aggregation is capable of compacting partial.
def is_compacting(cls): """Returns whether this aggregation is capable of compacting partial partition's shards list. """ return False
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 1, "docstring_loc": 3, "function_name": "is_compacting", "class_name": "ShuffleAggregation", "qualname": "ShuffleAggregation.is_compacting", "file_path": "python/ray/data/_internal/execution/operators/hash_shuffle.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "self_contained"}
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py:OffloadingConnectorScheduler.request_finished
# Context: from typing import Any from vllm.v1.request import Request class OffloadingOperationMetrics: ... class OffloadingConnectorStats(KVConnectorStats): ... class OffloadingConnectorMetadata(KVConnectorMetadata): ... class OffloadingConnector(KVConnectorBase_V1): ... class OffloadingConnectorWorker: ... class OffloadPromMetrics(KVConnectorPromMetrics): ... class OffloadingConnectorScheduler: def __init__(self, spec: OffloadingSpec): self.gpu_block_size = spec.gpu_block_size self.offloaded_block_size = spec.offloaded_block_size self.block_size_factor = self.offloaded_block_size // self.gpu_block_size self.manager: OffloadingManager = spec.get_manager() self._requests: dict[ReqId, Request] = {} # list of GPU block IDs per request self._request_block_ids: dict[ReqId, list[int]] = {} # requests to load for the current scheduler step self._reqs_to_load: dict[ReqId, TransferSpec] = {} # request blocks are stored in order # index of next block (of size offloaded_block_size) to offload self._next_stored_block_idx: dict[ReqId, int] = {} # if GPU prefix caching is enabled, # track loaded blocks to avoid redundant loads self._blocks_being_loaded: set[BlockHash] | None = ( set() if spec.vllm_config.cache_config.enable_prefix_caching else None ) # request ID -> set(block hashes being stored/load) self._reqs_being_stored = defaultdict[ReqId, set[BlockHash]](set) self._reqs_being_loaded = defaultdict[ReqId, set[BlockHash]](set) def _get_block_hashes(self, req: Request, start_idx: int, end_idx: int | None) -> Iterable[BlockHash]: ... def get_num_new_matched_tokens(self, request: Request, num_computed_tokens: int) -> tuple[int | None, bool]: ... def update_state_after_alloc(self, request: Request, blocks: KVCacheBlocks, num_external_tokens: int): ... def _get_reqs_to_store(self, scheduler_output: SchedulerOutput): ... def build_connector_meta(self, scheduler_output: SchedulerOutput) -> KVConnectorMetadata: ... def update_connector_output(self, connector_output: KVConnectorOutput): ... def take_events(self) -> Iterable[KVCacheEvent]: ... # Task: Write a Python method `request_finished` for the class `OffloadingConnectorScheduler` to called when a request has finished, before its blocks are freed. Parameters: request: Request, block_ids: list[int] Returns: tuple[bool, dict[str, Any] | None]
def request_finished( self, request: Request, block_ids: list[int], ) -> tuple[bool, dict[str, Any] | None]: """ Called when a request has finished, before its blocks are freed. Returns: True if the request is being saved/sent asynchronously and blocks should not be freed until the request_id is returned from get_finished(). Optional KVTransferParams to be included in the request outputs returned by the engine. """ req_id = request.request_id self._requests.pop(req_id, None) self._request_block_ids.pop(req_id, None) self._next_stored_block_idx.pop(req_id, None) request_being_stored = req_id in self._reqs_being_stored return request_being_stored, None
function_simple
1
{"cognitive_complexity": 0, "loc": 22, "code_loc": 6, "docstring_loc": 10, "function_name": "request_finished", "class_name": "OffloadingConnectorScheduler", "qualname": "OffloadingConnectorScheduler.request_finished", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/redis/tests/unit/redis/triggers/test_redis_await_message.py:TestAwaitMessageTrigger.test_trigger_serialization
# Context: from airflow.providers.redis.triggers.redis_await_message import AwaitMessageTrigger class TestAwaitMessageTrigger: async def test_trigger_run_succeed(self, mock_redis_conn): ... async def test_trigger_run_succeed_with_bytes(self, mock_redis_conn): ... async def test_trigger_run_fail(self, mock_redis_conn): ... # Task: Write a Python test method `test_trigger_serialization` in test class `TestAwaitMessageTrigger` to verify the behavior of `trigger_serialization`. Module under test: __future__, airflow.providers.redis.triggers.redis_await_message
def test_trigger_serialization(self): trigger = AwaitMessageTrigger( channels=["test_channel"], redis_conn_id="redis_default", poll_interval=30, ) assert isinstance(trigger, AwaitMessageTrigger) classpath, kwargs = trigger.serialize() assert classpath == "airflow.providers.redis.triggers.redis_await_message.AwaitMessageTrigger" assert kwargs == dict( channels=["test_channel"], redis_conn_id="redis_default", poll_interval=30, )
test
1
{"function_name": "test_trigger_serialization", "class_name": "TestAwaitMessageTrigger", "qualname": "TestAwaitMessageTrigger.test_trigger_serialization", "file_path": "providers/redis/tests/unit/redis/triggers/test_redis_await_message.py", "repo_id": "apache/airflow", "loc": 17, "tested_modules": ["__future__", "airflow.providers.redis.triggers.redis_await_message"], "has_docstring": false, "runnable_level": "project_runnable"}
infiniflow/ragflow:common/data_source/utils.py:make_paginated_slack_api_call
# Context: from collections.abc import Callable, Generator, Iterator, Mapping, Sequence from typing import IO, Any, Generic, Iterable, Optional, Protocol, TypeVar, cast from slack_sdk.web import SlackResponse def datetime_from_string(datetime_string: str) -> datetime: ... def is_valid_image_type(mime_type: str) -> bool: ... def _handle_http_error(e: requests.HTTPError, attempt: int) -> int: ... def update_param_in_path(path: str, param: str, value: str) -> str: ... def build_confluence_document_id(base_url: str, content_url: str, is_cloud: bool) -> str: ... def get_single_param_from_url(url: str, param: str) -> str | None: ... def get_start_param_from_url(url: str) -> int: ... def wrap_request_to_handle_ratelimiting(request_fn: R, default_wait_time_sec: int, max_waits: int) -> R: ... class _RateLimitedRequest: ... def create_s3_client(bucket_type: BlobType, credentials: dict[str, Any], european_residency: bool) -> S3Client: ... def detect_bucket_region(s3_client: S3Client, bucket_name: str) -> str | None: ... def download_object(s3_client: S3Client, bucket_name: str, key: str, size_threshold: int | None) -> bytes | None: ... def read_stream_with_limit(body: Any, key: str, size_threshold: int) -> bytes | None: ... def _extract_onyx_metadata(line: str) -> dict | None: ... def read_text_file(file: IO, encoding: str, errors: str, ignore_onyx_metadata: bool) -> tuple[str, dict]: ... def get_blob_link(bucket_type: BlobType, s3_client: S3Client, bucket_name: str, key: str, bucket_region: str | None) -> str: ... def extract_size_bytes(obj: Mapping[str, Any]) -> int | None: ... def get_file_ext(file_name: str) -> str: ... def is_accepted_file_ext(file_ext: str, extension_type: OnyxExtensionType) -> bool: ... def detect_encoding(file: IO[bytes]) -> str: ... def get_markitdown_converter(): ... def to_bytesio(stream: IO[bytes]) -> BytesIO: ... def get_base_url(token: str) -> str: ... def get_message_link(event: dict, client: WebClient, channel_id: str) -> str: ... def make_slack_api_call(call: Callable[..., SlackResponse], **kwargs) -> SlackResponse: ... def _make_slack_api_call_paginated(call: Callable[..., SlackResponse]) -> Callable[..., Generator[dict[str, Any], None, None]]: ... def is_atlassian_date_error(e: Exception) -> bool: ... def expert_info_from_slack_id(user_id: str | None, client: WebClient, user_cache: dict[str, BasicExpertInfo | None]) -> BasicExpertInfo | None: ... class SlackTextCleaner: ... def is_mail_service_disabled_error(error: HttpError) -> bool: ... def build_time_range_query(time_range_start: SecondsSinceUnixEpoch | None, time_range_end: SecondsSinceUnixEpoch | None) -> str | None: ... def clean_email_and_extract_name(email: str) -> tuple[str, str | None]: ... def get_message_body(payload: dict[str, Any]) -> str: ... def time_str_to_utc(time_str: str): ... def gmail_time_str_to_utc(time_str: str): ... def batch_generator(items: Iterable[T], batch_size: int, pre_batch_yield: Callable[[list[T]], None] | None) -> Generator[list[T], None, None]: ... def fetch_notion_data(url: str, headers: dict[str, str], method: str, json_data: Optional[dict]) -> dict[str, Any]: ... def properties_to_str(properties: dict[str, Any]) -> str: ... def filter_pages_by_time(pages: list[dict[str, Any]], start: float, end: float, filter_field: str) -> list[dict[str, Any]]: ... def _load_all_docs(connector: CheckpointedConnector[CT], load: LoadFunction) -> list[Document]: ... def load_all_docs_from_checkpoint_connector(connector: CheckpointedConnector[CT], start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> list[Document]: ... def is_atlassian_cloud_url(url: str) -> bool: ... def get_cloudId(base_url: str) -> str: ... def scoped_url(url: str, product: str) -> str: ... def process_confluence_user_profiles_override(confluence_user_email_override: list[dict[str, str]]) -> list[ConfluenceUser]: ... def confluence_refresh_tokens(client_id: str, client_secret: str, cloud_id: str, refresh_token: str) -> dict[str, Any]: ... class TimeoutThread(threading.Thread, Generic[R]): ... def run_with_timeout(timeout: float, func: Callable[..., R], *args, **kwargs) -> R: ... def validate_attachment_filetype(attachment: dict[str, Any]) -> bool: ... class CallableProtocol(Protocol): ... def run_functions_tuples_in_parallel(functions_with_args: Sequence[tuple[CallableProtocol, tuple[Any, ...]]], allow_failures: bool, max_workers: int | None) -> list[Any]: ... def _next_or_none(ind: int, gen: Iterator[R]) -> tuple[int, R | None]: ... def parallel_yield(gens: list[Iterator[R]], max_workers: int) -> Iterator[R]: ... def sanitize_filename(name: str, extension: str) -> str: ... class _RateLimitDecorator: ... def retry_builder(tries: int, delay: float, max_delay: float | None, backoff: float, jitter: tuple[float, float] | float, exceptions: type[Exception] | tuple[type[Exception], ...]) -> Callable[[F], F]: ... # Task: Write a Python function `make_paginated_slack_api_call` to make paginated Slack API call. Parameters: call: Callable[..., SlackResponse] Returns: Generator[dict[str, Any], None, None]
def make_paginated_slack_api_call(call: Callable[..., SlackResponse], **kwargs: Any) -> Generator[dict[str, Any], None, None]: """Make paginated Slack API call""" return _make_slack_api_call_paginated(call)(**kwargs)
function_simple
1
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "make_paginated_slack_api_call", "class_name": null, "qualname": "make_paginated_slack_api_call", "file_path": "common/data_source/utils.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/tests/batch/gpu/stages/test_serve_deployment_stage.py:test_serve_deployment_invalid_method
# Context: import pytest from ray.llm._internal.batch.stages.serve_deployment_stage import ( ServeDeploymentStageUDF, ) from ray.serve.llm.openai_api_models import ChatCompletionRequest, CompletionRequest def mock_serve_deployment_handle(): ... async def test_serve_deployment_udf_methods(mock_serve_deployment_handle, method, test_data): ... async def test_serve_deployment_missing_dtype(mock_serve_deployment_handle, dtype_mapping): ... async def test_serve_udf_default_raises_on_error(mock_serve_deployment_handle): ... async def test_serve_udf_continue_on_error_yields_error_row(mock_serve_deployment_handle): ... async def test_serve_udf_mixed_success_and_error(mock_serve_deployment_handle): ... async def test_serve_udf_fatal_errors_always_propagate(mock_serve_deployment_handle, fatal_error): ... async def test_serve_udf_unknown_errors_propagate(mock_serve_deployment_handle): ... async def test_serve_udf_success_with_continue_on_error_includes_none_error(mock_serve_deployment_handle): ... # Task: Write a Python test function `test_serve_deployment_invalid_method` to test that invalid method raises error at runtime. Module under test: ray.exceptions, ray.llm._internal.batch.stages.serve_deployment_stage, ray.serve._private.common
async def test_serve_deployment_invalid_method(mock_serve_deployment_handle): """Test that invalid method raises error at runtime.""" # Set up the mock to simulate a method that doesn't exist mock_serve_deployment_handle.invalid_method = None udf = ServeDeploymentStageUDF( data_column="__data", expected_input_keys=["method", "request_kwargs"], deployment_name="test_deployment", app_name="test_app", dtype_mapping={ "CompletionRequest": CompletionRequest, }, ) batch = { "__data": [ { "method": "invalid_method", "dtype": "CompletionRequest", "request_kwargs": {"prompt": "Hello", "temperature": 0.7}, } ] } with pytest.raises( ValueError, match="Method invalid_method not found in the serve deployment." ): async for _ in udf(batch): pass
test
0
{"function_name": "test_serve_deployment_invalid_method", "class_name": null, "qualname": "test_serve_deployment_invalid_method", "file_path": "python/ray/llm/tests/batch/gpu/stages/test_serve_deployment_stage.py", "repo_id": "ray-project/ray", "loc": 30, "tested_modules": ["ray.exceptions", "ray.llm._internal.batch.stages.serve_deployment_stage", "ray.serve._private.common", "ray.serve.exceptions", "ray.serve.llm.openai_api_models"], "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/data/util/data_batch_conversion.py:_unwrap_ndarray_object_type_if_needed
# Context: import numpy as np def _lazy_import_pandas(): ... class BatchFormat(str, Enum): ... def _convert_batch_type_to_pandas(data: DataBatchType, cast_tensor_columns: bool) -> 'pd.DataFrame': ... def _convert_pandas_to_batch_type(data: 'pd.DataFrame', type: BatchFormat, cast_tensor_columns: bool) -> DataBatchType: ... def _convert_batch_type_to_numpy(data: DataBatchType) -> Union[np.ndarray, Dict[str, np.ndarray]]: ... def _ndarray_to_column(arr: np.ndarray) -> Union['pd.Series', List[np.ndarray]]: ... def _cast_ndarray_columns_to_tensor_extension(df: 'pd.DataFrame') -> 'pd.DataFrame': ... def _cast_tensor_columns_to_ndarrays(df: 'pd.DataFrame') -> 'pd.DataFrame': ... # Task: Write a Python function `_unwrap_ndarray_object_type_if_needed` to unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single. Parameters: arr: np.ndarray Returns: np.ndarray
def _unwrap_ndarray_object_type_if_needed(arr: np.ndarray) -> np.ndarray: """Unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single contiguous ndarray, if needed/possible. """ if arr.dtype.type is np.object_: try: # Try to convert the NumPy ndarray to a non-object dtype. arr = np.array([np.asarray(v) for v in arr]) except Exception: # This may fail if the subndarrays are of heterogeneous shape pass return arr
function_simple
0
{"cognitive_complexity": 2, "loc": 12, "code_loc": 6, "docstring_loc": 3, "function_name": "_unwrap_ndarray_object_type_if_needed", "class_name": null, "qualname": "_unwrap_ndarray_object_type_if_needed", "file_path": "python/ray/data/util/data_batch_conversion.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "plib_runnable"}
huggingface/transformers:src/transformers/masking_utils.py:sliding_window_causal_mask_function
# Context: from collections.abc import Callable def and_masks(*mask_functions) -> Callable: ... def or_masks(*mask_functions) -> Callable: ... def causal_mask_function(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: ... def bidirectional_mask_function(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: ... def sliding_window_overlay(sliding_window: int) -> Callable: ... def chunked_overlay(chunk_size: int, left_padding: torch.Tensor) -> Callable: ... def sliding_window_bidirectional_overlay(sliding_window: int) -> Callable: ... def sliding_window_bidirectional_mask_function(sliding_window: int) -> Callable: ... def chunked_causal_mask_function(chunk_size: int, left_padding: torch.Tensor) -> Callable: ... def padding_mask_function(padding_mask: torch.Tensor) -> Callable: ... def packed_sequence_mask_function(packed_sequence_mask: torch.Tensor) -> Callable: ... def add_offsets_to_mask_function(mask_function: Callable, q_offset: int, kv_offset: int) -> Callable: ... def prepare_padding_mask(attention_mask: torch.Tensor | None, kv_length: int, kv_offset: int) -> torch.Tensor | None: ... def _can_skip_causal_mask_xpu(padding_mask: torch.Tensor | None, query_length: int, kv_length: int, local_attention_size: int | None) -> bool: ... def _ignore_causal_mask_sdpa(padding_mask: torch.Tensor | None, query_length: int, kv_length: int, kv_offset: int, local_attention_size: int | None) -> bool: ... def _can_skip_bidirectional_mask_xpu(padding_mask: torch.Tensor | None, kv_length: int, local_attention_size: int | None) -> bool: ... def _ignore_bidirectional_mask_sdpa(padding_mask: torch.Tensor | None, kv_length: int, local_attention_size: int | None) -> bool: ... def _vmap_expansion_sdpa(mask_function: Callable) -> Callable: ... def _non_vmap_expansion_sdpa(batch_indices: torch.Tensor, head_indices: torch.Tensor, q_indices: torch.Tensor, kv_indices: torch.Tensor): ... def sdpa_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, local_size: int | None, allow_is_causal_skip: bool, allow_is_bidirectional_skip: bool, allow_torch_fix: bool, use_vmap: bool, **kwargs) -> torch.Tensor | None: ... def eager_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, dtype: torch.dtype, allow_is_bidirectional_skip: bool, use_vmap: bool, **kwargs) -> torch.Tensor: ... def flash_attention_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, **kwargs): ... def flex_attention_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, **kwargs) -> BlockMask: ... class AttentionMaskInterface(GeneralInterface): ... def find_packed_sequence_indices(position_ids: torch.Tensor) -> torch.Tensor | None: ... def _preprocess_mask_arguments(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | BlockMask | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, layer_idx: int | None) -> tuple[bool, torch.Tensor | BlockMask | None, int, int]: ... def create_causal_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_bidirectional_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, encoder_hidden_states: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_sliding_window_causal_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_bidirectional_sliding_window_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_chunked_causal_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_masks_for_generate(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None, **kwargs): ... def get_style(style): ... def tensor_to_mask_visual(original_tensor: torch.Tensor, grid_size, style) -> str: ... class AttentionMask(torch.Tensor): ... # Task: Write a Python function `sliding_window_causal_mask_function` to this return the mask_function function to create a sliding window mask. Parameters: sliding_window: int Returns: Callable
def sliding_window_causal_mask_function(sliding_window: int) -> Callable: """ This return the mask_function function to create a sliding window mask. """ return and_masks(sliding_window_overlay(sliding_window), causal_mask_function)
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 1, "docstring_loc": 3, "function_name": "sliding_window_causal_mask_function", "class_name": null, "qualname": "sliding_window_causal_mask_function", "file_path": "src/transformers/masking_utils.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py:SerperDevTool._make_api_request
# Context: import json import os from typing import Any, TypedDict import requests class KnowledgeGraph(TypedDict): ... class Sitelink(TypedDict): ... class OrganicResult(TypedDict): ... class PeopleAlsoAskResult(TypedDict): ... class RelatedSearchResult(TypedDict): ... class NewsResult(TypedDict): ... class SearchParameters(TypedDict): ... class FormattedResults(TypedDict): ... def _save_results_to_file(content: str) -> None: ... class SerperDevToolSchema(BaseModel): ... class SerperDevTool(BaseTool): def _get_search_url(self, search_type: str) -> str: ... def _process_knowledge_graph(kg: dict[str, Any]) -> KnowledgeGraph: ... def _process_organic_results(self, organic_results: list[dict[str, Any]]) -> list[OrganicResult]: ... def _process_people_also_ask(self, paa_results: list[dict[str, Any]]) -> list[PeopleAlsoAskResult]: ... def _process_related_searches(self, related_results: list[dict[str, Any]]) -> list[RelatedSearchResult]: ... def _process_news_results(self, news_results: list[dict[str, Any]]) -> list[NewsResult]: ... def _process_search_results(self, results: dict[str, Any], search_type: str) -> dict[str, Any]: ... def _run(self, **kwargs) -> FormattedResults: ... # Task: Write a Python method `_make_api_request` for the class `SerperDevTool` to make API request to Serper. Parameters: search_query: str, search_type: str Returns: dict[str, Any]
def _make_api_request(self, search_query: str, search_type: str) -> dict[str, Any]: """Make API request to Serper.""" search_url = self._get_search_url(search_type) payload = {"q": search_query, "num": self.n_results} if self.country != "": payload["gl"] = self.country if self.location != "": payload["location"] = self.location if self.locale != "": payload["hl"] = self.locale headers = { "X-API-KEY": os.environ["SERPER_API_KEY"], "content-type": "application/json", } response = None try: response = requests.post( search_url, headers=headers, json=payload, timeout=10 ) response.raise_for_status() results = response.json() if not results: logger.error("Empty response from Serper API") raise ValueError("Empty response from Serper API") return results except requests.exceptions.RequestException as e: error_msg = f"Error making request to Serper API: {e}" if response is not None and hasattr(response, "content"): error_msg += f"\nResponse content: {response.content.decode('utf-8', errors='replace')}" logger.error(error_msg) raise except json.JSONDecodeError as e: if response is not None and hasattr(response, "content"): logger.error(f"Error decoding JSON response: {e}") logger.error( f"Response content: {response.content.decode('utf-8', errors='replace')}" ) else: logger.error( f"Error decoding JSON response: {e} (No response content available)" ) raise
function_complex
0
{"cognitive_complexity": 14, "loc": 45, "code_loc": 40, "docstring_loc": 1, "function_name": "_make_api_request", "class_name": "SerperDevTool", "qualname": "SerperDevTool._make_api_request", "file_path": "lib/crewai-tools/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:ci/ray_ci/test_ray_image.py:TestValidateInvalid.test_invalid_platform_for_ray_llm
# Context: import pytest from ci.ray_ci.ray_image import IMAGE_TYPE_CONFIG, RayImage, RayImageError class TestWandaImageName: ... class TestArchSuffix: ... class TestRepo: ... class TestVariationSuffix: ... class TestValidateValid: ... class TestImageTypeConfig: ... class TestValidateInvalid: def test_unknown_image_type(self): ... def test_invalid_python_for_ray_llm(self): ... def test_invalid_platform_for_ray(self): ... def test_invalid_architecture_for_ray_llm(self): ... # Task: Write a Python test method `test_invalid_platform_for_ray_llm` in test class `TestValidateInvalid` to verify the behavior of `invalid_platform_for_ray_llm`. Module under test: ci.ray_ci.configs, ci.ray_ci.docker_container, ci.ray_ci.ray_image
def test_invalid_platform_for_ray_llm(self): with pytest.raises(RayImageError, match="Invalid platform cpu for ray-llm"): RayImage("ray-llm", "3.11", "cpu").validate()
test
0
{"function_name": "test_invalid_platform_for_ray_llm", "class_name": "TestValidateInvalid", "qualname": "TestValidateInvalid.test_invalid_platform_for_ray_llm", "file_path": "ci/ray_ci/test_ray_image.py", "repo_id": "ray-project/ray", "loc": 3, "tested_modules": ["ci.ray_ci.configs", "ci.ray_ci.docker_container", "ci.ray_ci.ray_image"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/vaultgemma/test_modeling_vaultgemma.py:VaultGemmaIntegrationTest.test_export_static_cache
# Context: import pytest from packaging import version from transformers import ( AutoModelForCausalLM, AutoTokenizer, DynamicCache, is_torch_available, pipeline, ) from transformers.generation.configuration_utils import GenerationConfig from transformers.testing_utils import ( Expectations, cleanup, is_flash_attn_2_available, is_kernels_available, is_torch_xpu_available, require_torch, require_torch_accelerator, slow, torch_device, ) import torch from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM class VaultGemmaModelTester(CausalLMModelTester): ... class VaultGemmaModelTest(CausalLMModelTest, unittest.TestCase): ... class VaultGemmaIntegrationTest(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] def setUp(self): ... def tearDown(self): ... def test_model_bf16(self): ... def test_model_pipeline_bf16(self): ... def test_generation_beyond_sliding_window(self, attn_implementation: str): ... def test_generation_beyond_sliding_window_dynamic(self, attn_implementation: str): ... # Task: Write a Python test method `test_export_static_cache` in test class `VaultGemmaIntegrationTest` to verify the behavior of `export_static_cache`. Module under test: packaging, parameterized, transformers
def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.5.0"): self.skipTest(reason="This test requires torch >= 2.5 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) model_id = "google/vaultgemma-1b" tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETIONS = Expectations( { ("cuda", 8): ["Hello I am doing a project on a 1990 240sx. I have a 1"], } ) EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation() max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompts = ["Hello I am doing"] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + export from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
test
0
{"function_name": "test_export_static_cache", "class_name": "VaultGemmaIntegrationTest", "qualname": "VaultGemmaIntegrationTest.test_export_static_cache", "file_path": "tests/models/vaultgemma/test_modeling_vaultgemma.py", "repo_id": "huggingface/transformers", "loc": 60, "tested_modules": ["packaging", "parameterized", "transformers", "transformers.cache_utils", "transformers.generation.configuration_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/models/molmo2.py:Molmo2VisionBlock:class_doc
Write a class-level docstring for `Molmo2VisionBlock` (inherits from nn.Module) which has methods: `__init__`, `forward`.
Residual attention block used in Vision Transformer.
documentation
1
{"doc_type": "class", "class_name": "Molmo2VisionBlock", "file_path": "vllm/model_executor/models/molmo2.py", "repo_id": "vllm-project/vllm", "char_length": 52, "methods": ["__init__", "forward"]}
ray-project/ray:release/train_tests/benchmark/runner.py:TrainLoopRunner._train_epoch
# Context: import pprint class VanillaTorchRunner(TrainLoopRunner): ... class TrainLoopRunner: def __init__(self, factory: BenchmarkFactory): self.factory = factory self.benchmark_config = factory.benchmark_config self._setup() # Training progress state. self._train_batch_idx: int = 0 self._train_epoch_idx: int = 0 self._global_rows_processed_this_epoch: int = 0 # Performance metrics self._metrics = collections.defaultdict(lambda: Timer()) checkpoint = ray.train.get_checkpoint() if checkpoint: self._restore_from_checkpoint(checkpoint) def _setup(self): ... def _cleanup(self): ... def _train_step(self, train_dataloader): ... def _validate_step(self, val_dataloader): ... def _save_training_state(self, local_dir: str): ... def _load_training_state(self, local_dir: str): ... def _restore_from_checkpoint(self, checkpoint: ray.train.Checkpoint): ... def _wrap_dataloader(self, dataloader, train: bool): ... def _num_batches_to_skip(self) -> int: ... def _validate_epoch(self) -> Dict[str, float]: ... def _should_checkpoint_during_epoch(self) -> bool: ... def _should_validate_during_epoch(self) -> bool: ... def _should_log_metrics(self) -> bool: ... def _validate(self) -> Dict[str, float]: ... def _checkpoint(self, metrics: Optional[Dict[str, float]]): ... def _load_checkpoint(self, local_dir: str): ... def _save_checkpoint(self, local_dir: str): ... def _report_checkpoint(self, metrics, checkpoint): ... def run(self): ... def get_metrics(self, dataset_creation_time: float) -> Dict[str, float]: ... # Task: Write a Python method `_train_epoch` for the class `TrainLoopRunner` to subclasses can override the entrire `_train_epoch` method for more training.
def _train_epoch(self): """Subclasses can override the entrire `_train_epoch` method for more training logic customization.""" if ray.train.get_context().get_world_rank() == 0: logger.info(f"Training starting @ epoch={self._train_epoch_idx}") train_dataloader = self.factory.get_train_dataloader() train_dataloader = self._wrap_dataloader(train_dataloader, train=True) # Skip through batches if we restored to a middle of the epoch. # TODO: Compare this baseline to the data checkpointing approach once we have it. if self._num_batches_to_skip: if ray.train.get_context().get_world_rank() == 0: logger.info(f"Skipping {self._num_batches_to_skip} batches...") for _ in range(self._num_batches_to_skip): with self._metrics["train/iter_skip_batch"].timer(): next(train_dataloader) for batch in train_dataloader: with self._metrics["train/step"].timer(): if not self.benchmark_config.skip_train_step: self._train_step(batch) # TODO: This is slightly off if the last batch is a partial batch (if drop_last=False) global_batch_size = ( self.benchmark_config.dataloader_config.train_batch_size * ray.train.get_context().get_world_size() ) self._metrics["train/rows_processed"].add(global_batch_size) self._global_rows_processed_this_epoch += global_batch_size if self._should_checkpoint_during_epoch(): self._checkpoint() if self._should_validate_during_epoch(): validation_metrics = self._validate() self._checkpoint(validation_metrics) if self._should_log_metrics(): logger.info(pprint.pformat(self.get_metrics(), indent=2)) self._train_epoch_idx += 1 self._train_batch_idx = 0 self._global_rows_processed_this_epoch = 0
function_complex
0
{"cognitive_complexity": 16, "loc": 46, "code_loc": 30, "docstring_loc": 2, "function_name": "_train_epoch", "class_name": "TrainLoopRunner", "qualname": "TrainLoopRunner._train_epoch", "file_path": "release/train_tests/benchmark/runner.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
666ghj/BettaFish:MediaEngine/state/state.py:State.save_to_file
# Context: class Search: ... class Research: ... class Paragraph: ... class State: def add_paragraph(self, title: str, content: str) -> int: ... def get_paragraph(self, index: int) -> Optional[Paragraph]: ... def get_completed_paragraphs_count(self) -> int: ... def get_total_paragraphs_count(self) -> int: ... def is_all_paragraphs_completed(self) -> bool: ... def mark_completed(self): ... def update_timestamp(self): ... def get_progress_summary(self) -> Dict[str, Any]: ... def to_dict(self) -> Dict[str, Any]: ... def to_json(self, indent: int) -> str: ... def from_dict(cls, data: Dict[str, Any]) -> 'State': ... def from_json(cls, json_str: str) -> 'State': ... def load_from_file(cls, filepath: str) -> 'State': ... # Task: Write a Python method `save_to_file` for the class `State` to 保存状态到文件. Parameters: filepath: str
def save_to_file(self, filepath: str): """保存状态到文件""" with open(filepath, 'w', encoding='utf-8') as f: f.write(self.to_json())
function_simple
1
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "save_to_file", "class_name": "State", "qualname": "State.save_to_file", "file_path": "MediaEngine/state/state.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_runnable"}
langflow-ai/langflow:src/backend/tests/unit/utils/test_mcp_cleanup.py:TestTryTerminateMcpProcess.test_terminates_mcp_proxy_process
# Context: from unittest.mock import AsyncMock, MagicMock, patch from langflow.utils.mcp_cleanup import ( _kill_mcp_processes, _terminate_child_mcp_processes, _terminate_orphaned_mcp_processes, _try_terminate_mcp_process, cleanup_mcp_sessions, ) class TestCleanupMcpSessions: ... class TestKillMcpProcesses: ... class TestTerminateChildMcpProcesses: ... class TestTerminateOrphanedMcpProcesses: ... class TestMcpCleanupIntegration: ... class TestTryTerminateMcpProcess: async def test_terminates_mcp_server_process(self): ... async def test_skips_non_mcp_process(self): ... async def test_kills_process_on_timeout(self): ... async def test_handles_no_such_process(self): ... async def test_handles_access_denied(self): ... async def test_handles_zombie_process(self): ... async def test_handles_empty_cmdline(self): ... async def test_handles_none_cmdline(self): ... # Task: Write a Python test method `test_terminates_mcp_proxy_process` in test class `TestTryTerminateMcpProcess` to test termination of mcp-proxy process. Module under test: langflow.utils.mcp_cleanup
async def test_terminates_mcp_proxy_process(self): """Test termination of mcp-proxy process.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_psutil.TimeoutExpired = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = ["mcp-proxy", "--port", "8080"] mock_proc.terminate = MagicMock() mock_proc.wait = MagicMock() result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is True mock_proc.terminate.assert_called_once()
test
1
{"function_name": "test_terminates_mcp_proxy_process", "class_name": "TestTryTerminateMcpProcess", "qualname": "TestTryTerminateMcpProcess.test_terminates_mcp_proxy_process", "file_path": "src/backend/tests/unit/utils/test_mcp_cleanup.py", "repo_id": "langflow-ai/langflow", "loc": 17, "tested_modules": ["langflow.utils.mcp_cleanup"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/anthropic/completion.py:AnthropicCompletion.call
# Context: import logging from typing import TYPE_CHECKING, Any, Final, Literal, TypeGuard, cast from pydantic import BaseModel from crewai.llms.base_llm import BaseLLM, llm_call_context from crewai.utilities.types import LLMMessage def _supports_native_structured_outputs(model: str) -> bool: ... def _is_pydantic_model_class(obj: Any) -> TypeGuard[type[BaseModel]]: ... def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool: ... class AnthropicThinkingConfig(BaseModel): ... class AnthropicCompletion(BaseLLM): def __init__( self, model: str = "claude-3-5-sonnet-20241022", api_key: str | None = None, base_url: str | None = None, timeout: float | None = None, max_retries: int = 2, temperature: float | None = None, max_tokens: int = 4096, # Required for Anthropic top_p: float | None = None, stop_sequences: list[str] | None = None, stream: bool = False, client_params: dict[str, Any] | None = None, interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None, thinking: AnthropicThinkingConfig | None = None, response_format: type[BaseModel] | None = None, **kwargs: Any, ): """Initialize Anthropic chat completion client. Args: model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022') api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var) base_url: Custom base URL for Anthropic API timeout: Request timeout in seconds max_retries: Maximum number of retries temperature: Sampling temperature (0-1) max_tokens: Maximum tokens in response (required for Anthropic) top_p: Nucleus sampling parameter stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop) stream: Enable streaming responses client_params: Additional parameters for the Anthropic client interceptor: HTTP interceptor for modifying requests/responses at transport level. response_format: Pydantic model for structured output. When provided, responses will be validated against this model schema. **kwargs: Additional parameters """ super().__init__( model=model, temperature=temperature, stop=stop_sequences or [], **kwargs ) # Client params self.interceptor = interceptor self.client_params = client_params self.base_url = base_url self.timeout = timeout self.max_retries = max_retries self.client = Anthropic(**self._get_client_params()) async_client_params = self._get_client_params() if self.interceptor: async_transport = AsyncHTTPTransport(interceptor=self.interceptor) async_http_client = httpx.AsyncClient(transport=async_transport) async_client_params["http_client"] = async_http_client self.async_client = AsyncAnthropic(**async_client_params) # Store completion parameters self.max_tokens = max_tokens self.top_p = top_p self.stream = stream self.stop_sequences = stop_sequences or [] self.thinking = thinking self.previous_thinking_blocks: list[ThinkingBlock] = [] self.response_format = response_format # Model-specific settings self.is_claude_3 = "claude-3" in model.lower() self.supports_tools = True def stop(self) -> list[str]: ... def stop(self, value: list[str] | str | None) -> None: ... def _get_client_params(self) -> dict[str, Any]: ... async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _prepare_completion_params(self, messages: list[LLMMessage], system_message: str | None, tools: list[dict[str, Any]] | None, available_functions: dict[str, Any] | None) -> dict[str, Any]: ... def _convert_tools_for_interference(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]: ... def _extract_thinking_block(self, content_block: Any) -> ThinkingBlock | dict[str, Any] | None: ... def _format_messages_for_anthropic(self, messages: str | list[LLMMessage]) -> tuple[list[LLMMessage], str | None]: ... def _handle_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _handle_streaming_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _execute_tools_and_collect_results(self, tool_uses: list[ToolUseBlock | BetaToolUseBlock], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> list[dict[str, Any]]: ... def _execute_first_tool(self, tool_uses: list[ToolUseBlock | BetaToolUseBlock], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> Any | None: ... def _handle_tool_use_conversation(self, initial_response: Message | BetaMessage, tool_uses: list[ToolUseBlock | BetaToolUseBlock], params: dict[str, Any], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> str: ... async def _ahandle_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def _ahandle_streaming_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def _ahandle_tool_use_conversation(self, initial_response: Message | BetaMessage, tool_uses: list[ToolUseBlock | BetaToolUseBlock], params: dict[str, Any], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> str: ... def supports_function_calling(self) -> bool: ... def supports_stop_words(self) -> bool: ... def get_context_window_size(self) -> int: ... def _extract_anthropic_token_usage(response: Message | BetaMessage) -> dict[str, Any]: ... def supports_multimodal(self) -> bool: ... def get_file_uploader(self) -> Any: ... # Task: Write a Python method `call` for the class `AnthropicCompletion` to call Anthropic messages API. Parameters: messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None Returns: str | Any
def call( self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None = None, callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Any | None = None, from_agent: Any | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call Anthropic messages API. Args: messages: Input messages for the chat completion tools: List of tool/function definitions callbacks: Callback functions (not used in native implementation) available_functions: Available functions for tool calling from_task: Task that initiated the call from_agent: Agent that initiated the call Returns: Chat completion response or tool call result """ with llm_call_context(): try: # Emit call started event self._emit_call_started_event( messages=messages, tools=tools, callbacks=callbacks, available_functions=available_functions, from_task=from_task, from_agent=from_agent, ) # Format messages for Anthropic formatted_messages, system_message = ( self._format_messages_for_anthropic(messages) ) if not self._invoke_before_llm_call_hooks( formatted_messages, from_agent ): raise ValueError("LLM call blocked by before_llm_call hook") # Prepare completion parameters completion_params = self._prepare_completion_params( formatted_messages, system_message, tools, available_functions ) effective_response_model = response_model or self.response_format # Handle streaming vs non-streaming if self.stream: return self._handle_streaming_completion( completion_params, available_functions, from_task, from_agent, effective_response_model, ) return self._handle_completion( completion_params, available_functions, from_task, from_agent, effective_response_model, ) except Exception as e: error_msg = f"Anthropic API call failed: {e!s}" logging.error(error_msg) self._emit_call_failed_event( error=error_msg, from_task=from_task, from_agent=from_agent ) raise
function_complex
0
{"cognitive_complexity": 8, "loc": 77, "code_loc": 43, "docstring_loc": 13, "function_name": "call", "class_name": "AnthropicCompletion", "qualname": "AnthropicCompletion.call", "file_path": "lib/crewai/src/crewai/llms/providers/anthropic/completion.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py:DLinear.forward
# Context: import torch class moving_avg(nn.Module): ... class series_decomp(nn.Module): ... class DLinear(nn.Module): def __init__(self, configs: Dict[str, Any]): super().__init__() self.seq_len: int = configs["seq_len"] self.pred_len: int = configs["pred_len"] self.decompsition = series_decomp(kernel_size=KERNEL_SIZE) self.individual: bool = configs["individual"] self.channels: int = configs["enc_in"] if self.individual: self.Linear_Seasonal = nn.ModuleList() self.Linear_Trend = nn.ModuleList() for _ in range(self.channels): self.Linear_Seasonal.append(nn.Linear(self.seq_len, self.pred_len)) self.Linear_Trend.append(nn.Linear(self.seq_len, self.pred_len)) else: self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len) self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len) # Task: Write a Python method `forward` for the class `DLinear` to forward pass for the DLinear model. Parameters: x: torch.Tensor Returns: torch.Tensor
def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for the DLinear model. Args: x (torch.Tensor): Input tensor. Can be 2D [Batch, SeqLen] (interpreted as 1 channel) or 3D [Batch, SeqLen, Channels]. Returns: torch.Tensor: Output tensor of shape [Batch, PredLen, Channels]. """ # DLinear model (and many time series models) expect input of shape: # (batch_size, sequence_length, num_input_features). # seasonal_init, trend_init shapes: [Batch, SeqLen, Channel]. seasonal_init, trend_init = self.decompsition(x) # Permute to [Batch, Channel, SeqLen] for Linear layers. seasonal_init = seasonal_init.permute(0, 2, 1) trend_init = trend_init.permute(0, 2, 1) if self.individual: seasonal_output = torch.zeros( [seasonal_init.size(0), seasonal_init.size(1), self.pred_len], dtype=seasonal_init.dtype, ).to(seasonal_init.device) trend_output = torch.zeros( [trend_init.size(0), trend_init.size(1), self.pred_len], dtype=trend_init.dtype, ).to(trend_init.device) for i in range(self.channels): seasonal_output[:, i, :] = self.Linear_Seasonal[i]( seasonal_init[:, i, :] ) trend_output[:, i, :] = self.Linear_Trend[i](trend_init[:, i, :]) else: # seasonal_init shape: [Batch, Channel, SeqLen]. # Linear layer applies to the last dim (SeqLen). seasonal_output = self.Linear_Seasonal( seasonal_init ) # Output: [Batch, Channel, PredLen]. trend_output = self.Linear_Trend( trend_init ) # Output: [Batch, Channel, PredLen]. output_x = seasonal_output + trend_output # Shape: [Batch, Channel, PredLen]. return output_x.permute(0, 2, 1) # Transform to [Batch, PredLen, Channel].
function_simple
0
{"cognitive_complexity": 4, "loc": 46, "code_loc": 26, "docstring_loc": 10, "function_name": "forward", "class_name": "DLinear", "qualname": "DLinear.forward", "file_path": "doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py:HyperbrowserLoadTool:class_doc
Write a class-level docstring for `HyperbrowserLoadTool` (inherits from BaseTool) which has methods: `__init__`, `_prepare_params`, `_extract_content`, `_run`.
HyperbrowserLoadTool. Scrape or crawl web pages and load the contents with optional parameters for configuring content extraction. Requires the `hyperbrowser` package. Get your API Key from https://app.hyperbrowser.ai/ Args: api_key: The Hyperbrowser API key, can be set as an environment variable `HYPERBROWSER_API_KEY` or passed directly
documentation
0
{"doc_type": "class", "class_name": "HyperbrowserLoadTool", "file_path": "lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py", "repo_id": "crewAIInc/crewAI", "char_length": 345, "methods": ["__init__", "_prepare_params", "_extract_content", "_run"]}
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_registry.py:TestComponentRegistry.test_register_and_get_direct_class
# Context: from ray.llm._internal.serve.utils.registry import ComponentRegistry, get_registry class TestComponentRegistry: def test_register_and_get_module_path(self): ... def test_get_nonexistent_component_raises(self): ... def test_invalid_string_format_raises(self): ... def test_double_registration_raises(self): ... def test_reregister_after_unregister(self): ... def test_get_registry_singleton(self): ... def test_get_registry_different_categories(self): ... def test_unregister(self): ... # Task: Write a Python test method `test_register_and_get_direct_class` in test class `TestComponentRegistry` to test registering and retrieving a class directly. Module under test: ray.llm._internal.serve.utils.registry
def test_register_and_get_direct_class(self): """Test registering and retrieving a class directly.""" registry = ComponentRegistry("test_category") test_class = type("TestClass", (), {}) registry.register("test_component", test_class) assert registry.contains("test_component") retrieved = registry.get("test_component") assert retrieved == test_class
test
0
{"function_name": "test_register_and_get_direct_class", "class_name": "TestComponentRegistry", "qualname": "TestComponentRegistry.test_register_and_get_direct_class", "file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_registry.py", "repo_id": "ray-project/ray", "loc": 9, "tested_modules": ["ray.llm._internal.serve.utils.registry"], "has_docstring": true, "runnable_level": "plib_runnable"}
scrapy/scrapy:tests/test_downloader_handlers_http_base.py:TestHttpBase.test_timeout_download_from_spider_nodata_rcvd
# Context: import sys import pytest from scrapy.exceptions import ( CannotResolveHostError, DownloadCancelledError, DownloadConnectionRefusedError, DownloadFailedError, DownloadTimeoutError, ResponseDataLossError, StopDownload, UnsupportedURLSchemeError, ) from scrapy.http import Headers, HtmlResponse, Request, Response, TextResponse from scrapy.utils.defer import deferred_from_coro, maybe_deferred_to_future from tests.mockserver.http import MockServer class TestHttp11Base(TestHttpBase): ... class TestHttps11Base(TestHttp11Base): ... class TestSimpleHttpsBase(ABC): ... class TestHttpsWrongHostnameBase(TestSimpleHttpsBase): ... class TestHttpsInvalidDNSIdBase(TestSimpleHttpsBase): ... class TestHttpsInvalidDNSPatternBase(TestSimpleHttpsBase): ... class TestHttpsCustomCiphersBase(TestSimpleHttpsBase): ... class TestHttpWithCrawlerBase(ABC): ... class TestHttpProxyBase(ABC): ... class TestHttpBase(ABC): is_secure = False def download_handler_cls(self) -> type[DownloadHandlerProtocol]: ... async def get_dh(self, settings_dict: dict[str, Any] | None) -> AsyncGenerator[DownloadHandlerProtocol]: ... async def test_unsupported_scheme(self) -> None: ... async def test_download(self, mockserver: MockServer) -> None: ... async def test_download_head(self, mockserver: MockServer) -> None: ... async def test_download_has_correct_http_status_code(self, mockserver: MockServer, http_status: HTTPStatus) -> None: ... async def test_server_receives_correct_request_headers(self, mockserver: MockServer) -> None: ... async def test_request_header_none(self, mockserver: MockServer) -> None: ... async def test_request_header_duplicate(self, mockserver: MockServer, request_headers: Any) -> None: ... async def test_server_receives_correct_request_body(self, mockserver: MockServer) -> None: ... async def test_download_has_correct_response_headers(self, mockserver: MockServer) -> None: ... async def test_redirect_status(self, mockserver: MockServer) -> None: ... async def test_redirect_status_head(self, mockserver: MockServer) -> None: ... async def test_timeout_download_from_spider_server_hangs(self, mockserver: MockServer, reactor_pytest: str) -> None: ... async def test_host_header(self, send_header: bool, mockserver: MockServer) -> None: ... async def test_content_length_zero_bodyless_post_request_headers(self, mockserver: MockServer) -> None: ... async def test_content_length_zero_bodyless_post_only_one(self, mockserver: MockServer) -> None: ... async def test_payload(self, mockserver: MockServer) -> None: ... async def test_response_header_content_length(self, mockserver: MockServer) -> None: ... async def test_response_class(self, filename: str, body: bytes, response_class: type[Response], mockserver: MockServer) -> None: ... async def test_get_duplicate_header(self, mockserver: MockServer) -> None: ... async def test_download_is_not_automatically_gzip_decoded(self, mockserver: MockServer) -> None: ... async def test_no_cookie_processing_or_persistence(self, mockserver: MockServer) -> None: ... # Task: Write a Python test method `test_timeout_download_from_spider_nodata_rcvd` in test class `TestHttpBase` to verify the behavior of `timeout_download_from_spider_nodata_rcvd`. Module under test: __future__, abc, contextlib
async def test_timeout_download_from_spider_nodata_rcvd( self, mockserver: MockServer, reactor_pytest: str ) -> None: if reactor_pytest == "asyncio" and sys.platform == "win32": # https://twistedmatrix.com/trac/ticket/10279 pytest.skip( "This test produces DirtyReactorAggregateError on Windows with asyncio" ) # client connects but no data is received meta = {"download_timeout": 0.5} request = Request(mockserver.url("/wait", is_secure=self.is_secure), meta=meta) async with self.get_dh() as download_handler: d = deferred_from_coro(download_handler.download_request(request)) with pytest.raises(DownloadTimeoutError): await maybe_deferred_to_future(d)
test
1
{"function_name": "test_timeout_download_from_spider_nodata_rcvd", "class_name": "TestHttpBase", "qualname": "TestHttpBase.test_timeout_download_from_spider_nodata_rcvd", "file_path": "tests/test_downloader_handlers_http_base.py", "repo_id": "scrapy/scrapy", "loc": 16, "tested_modules": ["__future__", "abc", "contextlib", "http", "ipaddress"], "has_docstring": false, "runnable_level": "project_runnable"}
vllm-project/vllm:benchmarks/benchmark_topk_topp.py:print_summary_table
# Context: class BenchmarkConfig: ... def calculate_ops_pct(k_values: torch.Tensor | None, p_values: torch.Tensor | None, vocab_size: int, batch_size: int) -> float: ... def create_logits(batch_size: int, vocab_size: int, device: str) -> torch.Tensor: ... def measure_memory() -> tuple[int, int]: ... def reset_memory_stats(): ... def benchmark_function(func, logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, warmup_iters: int, benchmark_iters: int) -> tuple[float, int]: ... def create_benchmark_configs(batch_sizes: list[int], vocab_sizes: list[int], device: str) -> list[BenchmarkConfig]: ... def format_memory(bytes_val: int) -> str: ... def run_benchmark(configs: list[BenchmarkConfig], warmup_iters: int, benchmark_iters: int, verbose: bool): ... def main(): ... # Task: Write a Python function `print_summary_table` to print a summary table of results. Parameters: results: list[dict]
def print_summary_table(results: list[dict]): """Print a summary table of results.""" print() print("=" * 130) print("SUMMARY TABLE") print("=" * 130) print() # Header header = ( f"{'Scenario':<40} {'Batch':>6} {'Vocab':>7} {'Ops%':>6} " f"{'Triton (ms)':>12} {'PyTorch (ms)':>13} {'Speedup':>8} " f"{'Tri Mem':>10} {'Pyt Mem':>10}" ) print(header) print("-" * 130) # Group by scenario type current_vocab = None for result in results: config = result["config"] # Add separator between vocab sizes if current_vocab != config.vocab_size: if current_vocab is not None: print("-" * 130) current_vocab = config.vocab_size scenario = config.name.split("_b")[0] # Extract scenario name print( f"{scenario:<40} {config.batch_size:>6} {config.vocab_size:>7} " f"{config.ops_pct:>5.0f}% " f"{result['triton_time_ms']:>12.3f} {result['pytorch_time_ms']:>13.3f} " f"{result['speedup']:>7.2f}x " f"{format_memory(result['triton_mem']):>10} " f"{format_memory(result['pytorch_mem']):>10}" ) print("=" * 130)
function_complex
1
{"cognitive_complexity": 6, "loc": 39, "code_loc": 29, "docstring_loc": 1, "function_name": "print_summary_table", "class_name": null, "qualname": "print_summary_table", "file_path": "benchmarks/benchmark_topk_topp.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:tests/ci/test_action_loop_detection.py:test_detector_window_slides
# Context: from browser_use.agent.views import ( ActionLoopDetector, PageFingerprint, compute_action_hash, ) def _get_context_messages(agent: Agent) -> list[str]: ... def test_search_normalization_ignores_keyword_order(): ... def test_search_normalization_ignores_case(): ... def test_search_normalization_ignores_punctuation(): ... def test_search_normalization_deduplicates_tokens(): ... def test_search_different_queries_produce_different_hashes(): ... def test_click_same_index_same_hash(): ... def test_click_different_index_different_hash(): ... def test_input_same_element_same_text(): ... def test_input_different_text_different_hash(): ... def test_navigate_same_url_same_hash(): ... def test_navigate_different_paths_different_hash(): ... def test_navigate_different_domain_different_hash(): ... def test_scroll_direction_matters(): ... def test_scroll_different_elements_different_hash(): ... def test_scroll_same_element_same_hash(): ... def test_different_action_types_different_hashes(): ... def test_detector_no_nudge_for_diverse_actions(): ... def test_detector_nudge_at_5_repeats(): ... def test_detector_no_nudge_at_4_repeats(): ... def test_detector_nudge_escalates_at_8_repeats(): ... def test_detector_nudge_escalates_at_12_repeats(): ... def test_detector_critical_message_no_done_directive(): ... def test_detector_first_nudge_no_cannot_complete(): ... def test_detector_search_variations_detected_as_same(): ... def test_page_stagnation_no_nudge_when_pages_change(): ... def test_page_stagnation_nudge_at_5_identical_pages(): ... def test_page_stagnation_no_nudge_at_4_identical_pages(): ... def test_page_stagnation_resets_on_change(): ... def test_combined_loop_and_stagnation(): ... def test_page_fingerprint_same_content_equal(): ... def test_page_fingerprint_different_content_not_equal(): ... def test_page_fingerprint_different_url_not_equal(): ... def test_page_fingerprint_different_element_count_not_equal(): ... async def test_loop_nudge_injected_into_context(): ... async def test_no_loop_nudge_when_disabled(): ... async def test_no_loop_nudge_for_diverse_actions(): ... async def test_loop_detector_initialized_from_settings(): ... async def test_loop_detector_default_window_size(): ... # Task: Write a Python test function `test_detector_window_slides` to old actions fall out of the window. Module under test: browser_use.agent.service, browser_use.agent.views, browser_use.llm.messages
def test_detector_window_slides(): """Old actions fall out of the window.""" detector = ActionLoopDetector(window_size=10) # Fill window with repeated actions for _ in range(5): detector.record_action('click', {'index': 7}) assert detector.max_repetition_count == 5 # Push them out with diverse actions for i in range(10): detector.record_action('click', {'index': 100 + i}) # The 5 old repeated actions should have been pushed out assert detector.max_repetition_count < 5 assert detector.get_nudge_message() is None
test
0
{"function_name": "test_detector_window_slides", "class_name": null, "qualname": "test_detector_window_slides", "file_path": "tests/ci/test_action_loop_detection.py", "repo_id": "browser-use/browser-use", "loc": 14, "tested_modules": ["browser_use.agent.service", "browser_use.agent.views", "browser_use.llm.messages", "tests.ci.conftest"], "has_docstring": true, "runnable_level": "project_runnable"}
docling-project/docling:docling/models/inference_engines/vlm/api_openai_compatible_engine.py:ApiVlmEngine.cleanup
# Context: class ApiVlmEngine(BaseVlmEngine): def __init__( self, enable_remote_services: bool, options: ApiVlmEngineOptions, model_config: Optional["EngineModelConfig"] = None, ): """Initialize the API engine. Args: options: API-specific runtime options model_config: Model configuration (repo_id, revision, extra_config) """ super().__init__(options, model_config=model_config) self.enable_remote_services = enable_remote_services self.options: ApiVlmEngineOptions = options if not self.enable_remote_services: raise OperationNotAllowed( "Connections to remote services is only allowed when set explicitly. " "pipeline_options.enable_remote_services=True." ) # Merge model_config extra_config (which contains API params from model spec) # with runtime options params. Runtime options take precedence. if model_config and "api_params" in model_config.extra_config: # Model spec provides API params (e.g., model name) model_api_params = model_config.extra_config["api_params"] # Only use model spec params if user hasn't provided any params # This prevents conflicts when users provide custom params (e.g., model_id for watsonx) if not self.options.params: self.merged_params = model_api_params.copy() else: # User provided params - use them as-is (don't merge with model spec) self.merged_params = self.options.params.copy() else: self.merged_params = self.options.params.copy() def initialize(self) -> None: ... def predict_batch(self, input_batch: List[VlmEngineInput]) -> List[VlmEngineOutput]: ... # Task: Write a Python method `cleanup` for the class `ApiVlmEngine` to clean up API runtime resources. Returns: None
def cleanup(self) -> None: """Clean up API runtime resources. For API runtimes, there's nothing to clean up. """ _log.info("API runtime cleaned up")
function_simple
1
{"cognitive_complexity": 0, "loc": 6, "code_loc": 1, "docstring_loc": 4, "function_name": "cleanup", "class_name": "ApiVlmEngine", "qualname": "ApiVlmEngine.cleanup", "file_path": "docling/models/inference_engines/vlm/api_openai_compatible_engine.py", "repo_id": "docling-project/docling", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai-files/tests/test_file_url.py:TestFileUrl.test_invalid_url_scheme_raises
# Context: from crewai_files import FileBytes, FileUrl, ImageFile import pytest class TestNormalizeSource: ... class TestResolverUrlHandling: ... class TestImageFileWithUrl: ... class TestFileUrl: def test_create_file_url(self): ... def test_create_file_url_with_filename(self): ... def test_invalid_url_scheme_file_raises(self): ... def test_http_url_valid(self): ... def test_https_url_valid(self): ... def test_content_type_guessing_png(self): ... def test_content_type_guessing_jpeg(self): ... def test_content_type_guessing_pdf(self): ... def test_content_type_guessing_with_query_params(self): ... def test_content_type_fallback_unknown(self): ... def test_content_type_no_extension(self): ... def test_read_fetches_content(self): ... def test_read_caches_content(self): ... def test_read_updates_content_type_from_response(self): ... async def test_aread_fetches_content(self): ... async def test_aread_caches_content(self): ... # Task: Write a Python test method `test_invalid_url_scheme_raises` in test class `TestFileUrl` to test that non-http(s) URLs raise ValueError. Module under test: crewai_files, crewai_files.core.resolved, crewai_files.core.sources
def test_invalid_url_scheme_raises(self): """Test that non-http(s) URLs raise ValueError.""" with pytest.raises(ValueError, match="Invalid URL scheme"): FileUrl(url="ftp://example.com/file.txt")
test
0
{"function_name": "test_invalid_url_scheme_raises", "class_name": "TestFileUrl", "qualname": "TestFileUrl.test_invalid_url_scheme_raises", "file_path": "lib/crewai-files/tests/test_file_url.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["crewai_files", "crewai_files.core.resolved", "crewai_files.core.sources", "crewai_files.resolution.resolver"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/tests/test_tpu.py:test_get_current_node_labels_env_only
# Context: from ray._private.accelerators import TPUAcceleratorManager, tpu def test_get_current_pod_name_smoke(): ... def test_empty_get_current_pod_name_returns_none(): ... def test_worker_count(mock_glob, test_case): ... def test_num_tpu_chips(mock_glob): ... def test_is_valid_tpu_accelerator_topology(_mock_glob, test_case): ... def test_get_current_node_tpu_topology_from_metadata(): ... def test_infer_tpu_pod_type_from_topology(topology, accelerator_type, expected_pod_type, should_raise): ... def ray_start_cpu(): ... def ray_tpu_cluster(ray_start_cluster): ... def test_fetch_tpu_slice_name_from_pg(ray_tpu_cluster): ... def test_reserve_tpu_slice(ray_tpu_cluster): ... def test_slice_placement_group(ray_tpu_cluster): ... def test_multi_slice_placement_group(ray_tpu_cluster): ... def test_slice_placement_group_partial_failure_cleanup(mock_reserve, mock_remove_pg, mock_create_pg): ... def test_get_tpu_version_valid(accelerator_type, expected_version): ... def test_get_tpu_version_invalid(invalid_type): ... def test_get_tpu_num_slices_for_workers(topology, accelerator_type, num_workers, resources_per_worker, expected_slices): ... # Task: Write a Python test function `test_get_current_node_labels_env_only` to verify the behavior of `get_current_node_labels_env_only`. Module under test: ray._private.accelerators, ray.util.tpu
def test_get_current_node_labels_env_only(monkeypatch): # Simulate GKE TPU environment variables monkeypatch.setenv("TPU_NAME", "tpu-worker-group-2") monkeypatch.setenv("TPU_WORKER_ID", "0") monkeypatch.setenv("TPU_ACCELERATOR_TYPE", "v6e-16") monkeypatch.setenv("TPU_TOPOLOGY", "4x4") tpu_labels = TPUAcceleratorManager.get_current_node_accelerator_labels() assert tpu_labels["ray.io/tpu-slice-name"] == "tpu-worker-group-2" assert tpu_labels["ray.io/tpu-worker-id"] == "0" assert tpu_labels["ray.io/tpu-topology"] == "4x4" assert tpu_labels["ray.io/tpu-pod-type"] == "v6e-16"
test
0
{"function_name": "test_get_current_node_labels_env_only", "class_name": null, "qualname": "test_get_current_node_labels_env_only", "file_path": "python/ray/tests/test_tpu.py", "repo_id": "ray-project/ray", "loc": 13, "tested_modules": ["ray._private.accelerators", "ray.util.tpu"], "has_docstring": false, "runnable_level": "plib_runnable"}
huggingface/transformers:tests/models/ernie4_5_vl_moe/test_modeling_ernie4_5_vl_moe.py:Ernie4_5_VLMoeIntegrationTest.test_small_model_integration_test_batch_wo_image
# Context: from transformers.testing_utils import ( Expectations, cleanup, require_deterministic_for_xpu, require_torch, require_torch_large_accelerator, slow, torch_device, ) import torch class Ernie4_5_VLMoeVisionText2TextModelTester: ... class Ernie4_5_VLMoeModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): ... class Ernie4_5_VLMoeSmallIntegrationTest(unittest.TestCase): ... class Ernie4_5_VLMoeIntegrationTest(unittest.TestCase): model = None model_id = "baidu/ERNIE-4.5-VL-28B-A3B-PT" def setUp(self): ... def tearDown(self): ... def load_model(self, dtype, attn_implementation): ... def test_small_model_integration_test(self): ... def test_small_model_integration_test_batch(self): ... def test_small_model_integration_test_with_video(self): ... def test_small_model_integration_test_expand(self): ... def test_small_model_integration_test_batch_different_resolutions(self): ... # Task: Write a Python test method `test_small_model_integration_test_batch_wo_image` in test class `Ernie4_5_VLMoeIntegrationTest` to verify the behavior of `small_model_integration_test_batch_wo_image`. Module under test: transformers, transformers.testing_utils, transformers.utils
def test_small_model_integration_test_batch_wo_image(self): model = self.load_model("auto") message_wo_image = [ {"role": "user", "content": [{"type": "text", "text": "Who are you?"}]}, ] batched_messages = [self.message, message_wo_image] inputs = self.processor.apply_chat_template( batched_messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt", padding=True, ).to(torch_device) # This model on the hub has `do_sample=True`. torch.manual_seed(42) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "The animal in the image is a lynx. It's a medium-sized wild cat characterized by its distinctive facial ruff, short tail", "I am an AI assistant designed to help answer questions, provide information, and assist with tasks. I don't have personal experiences or a physical form" ] # fmt: skip self.assertEqual( [ self.processor.decode(output[0][len(inputs["input_ids"][0]) :], skip_special_tokens=True), self.processor.decode(output[1][len(inputs["input_ids"][1]) :], skip_special_tokens=True), ], EXPECTED_DECODED_TEXT, )
test
0
{"function_name": "test_small_model_integration_test_batch_wo_image", "class_name": "Ernie4_5_VLMoeIntegrationTest", "qualname": "Ernie4_5_VLMoeIntegrationTest.test_small_model_integration_test_batch_wo_image", "file_path": "tests/models/ernie4_5_vl_moe/test_modeling_ernie4_5_vl_moe.py", "repo_id": "huggingface/transformers", "loc": 33, "tested_modules": ["transformers", "transformers.testing_utils", "transformers.utils", "generation.test_utils", "test_configuration_common"], "has_docstring": false, "runnable_level": "class_runnable"}
huggingface/diffusers:src/diffusers/pipelines/ltx/pipeline_ltx_i2v_long_multi_prompt.py:linear_overlap_fuse
# Context: import torch def get_latent_coords(latent_num_frames, latent_height, latent_width, batch_size, device, rope_interpolation_scale, latent_idx): ... def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale): ... def adain_normalize_latents(curr_latents: torch.Tensor, ref_latents: torch.Tensor | None, factor: float) -> torch.Tensor: ... def split_into_temporal_windows(latent_len: int, temporal_tile_size: int, temporal_overlap: int, compression: int) -> list[tuple[int, int]]: ... def inject_prev_tail_latents(window_latents: torch.Tensor, prev_tail_latents: torch.Tensor | None, window_cond_mask_5d: torch.Tensor, overlap_lat: int, strength: float | None, prev_overlap_len: int) -> tuple[torch.Tensor, torch.Tensor, int]: ... def build_video_coords_for_window(latents: torch.Tensor, overlap_len: int, guiding_len: int, negative_len: int, rope_interpolation_scale: torch.Tensor, frame_rate: int) -> torch.Tensor: ... def parse_prompt_segments(prompt: str | list[str], prompt_segments: list[dict[str, Any]] | None) -> list[str]: ... def batch_normalize(latents, reference, factor): ... class LTXI2VLongMultiPromptPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin): ... # Task: Write a Python function `linear_overlap_fuse` to temporal linear crossfade between two latent clips over the overlap region. Parameters: prev: torch.Tensor, new: torch.Tensor, overlap: int Returns: torch.Tensor
def linear_overlap_fuse(prev: torch.Tensor, new: torch.Tensor, overlap: int) -> torch.Tensor: """ Temporal linear crossfade between two latent clips over the overlap region. Args: prev: Tensor [B, C, F, H, W]. Previous output segment. new: Tensor [B, C, F, H, W]. New segment to be appended. overlap: int. Number of frames to crossfade (overlap <= 1 concatenates without blend). Returns: Tensor [B, C, F_prev + F_new - overlap, H, W] after crossfade at the seam. """ if overlap <= 1: return torch.cat([prev, new], dim=2) alpha = torch.linspace(1, 0, overlap + 2, device=prev.device, dtype=prev.dtype)[1:-1] shape = [1] * prev.ndim shape[2] = alpha.size(0) alpha = alpha.reshape(shape) blended = alpha * prev[:, :, -overlap:] + (1 - alpha) * new[:, :, :overlap] return torch.cat([prev[:, :, :-overlap], blended, new[:, :, overlap:]], dim=2)
function_simple
1
{"cognitive_complexity": 1, "loc": 20, "code_loc": 8, "docstring_loc": 11, "function_name": "linear_overlap_fuse", "class_name": null, "qualname": "linear_overlap_fuse", "file_path": "src/diffusers/pipelines/ltx/pipeline_ltx_i2v_long_multi_prompt.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "plib_runnable"}
crewAIInc/crewAI:lib/crewai/tests/telemetry/test_execution_span_assignment.py:test_end_crew_receives_valid_execution_span
# Context: import pytest from crewai import Agent, Crew, Task def cleanup_singletons(): ... def test_crew_execution_span_assigned_on_kickoff(): ... def test_crew_execution_span_not_set_when_share_crew_false(): ... async def test_crew_execution_span_assigned_on_kickoff_async(): ... def test_crew_execution_span_assigned_on_kickoff_for_each(): ... # Task: Write a Python test function `test_end_crew_receives_valid_execution_span` to test that end_crew receives a valid execution span to close. Module under test: crewai, crewai.events.event_bus, crewai.events.event_listener
def test_end_crew_receives_valid_execution_span(): """Test that end_crew receives a valid execution span to close. This verifies the complete lifecycle: span creation, assignment, and closure without errors when end_crew() accesses crew._execution_span. """ agent = Agent( role="test agent", goal="say hello", backstory="a friendly agent", llm="gpt-4o-mini", ) task = Task( description="Say hello", expected_output="hello", agent=agent, ) crew = Crew( agents=[agent], tasks=[task], share_crew=True, ) result = crew.kickoff() assert crew._execution_span is not None assert result is not None
test
0
{"function_name": "test_end_crew_receives_valid_execution_span", "class_name": null, "qualname": "test_end_crew_receives_valid_execution_span", "file_path": "lib/crewai/tests/telemetry/test_execution_span_assignment.py", "repo_id": "crewAIInc/crewAI", "loc": 27, "tested_modules": ["crewai", "crewai.events.event_bus", "crewai.events.event_listener", "crewai.telemetry"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/unit/components/models_and_agents/test_conversation_context_ordering.py:TestALTKAgentContextOrdering.test_conversation_context_empty_history
# Context: from lfx.base.agents.altk_base_agent import ALTKBaseAgentComponent from lfx.schema.message import Message class TestALTKAgentContextOrdering: def test_conversation_context_chronological_order(self): ... def test_conversation_context_no_current_input(self): ... def test_conversation_context_single_turn(self): ... def test_conversation_context_multi_turn_regression(self): ... # Task: Write a Python test method `test_conversation_context_empty_history` in test class `TestALTKAgentContextOrdering` to test conversation context with empty chat history. Module under test: langchain_core.messages, lfx.base.agents.altk_base_agent, lfx.schema.message
def test_conversation_context_empty_history(self): """Test conversation context with empty chat history.""" current_input = Message(text="hello", sender="User", sender_name="User") class TestAgent(ALTKBaseAgentComponent): def __init__(self): self.input_value = current_input self.chat_history = [] agent = TestAgent() context = agent.build_conversation_context() # Should only contain current input assert len(context) == 1 assert context[0].content == "hello"
test
1
{"function_name": "test_conversation_context_empty_history", "class_name": "TestALTKAgentContextOrdering", "qualname": "TestALTKAgentContextOrdering.test_conversation_context_empty_history", "file_path": "src/backend/tests/unit/components/models_and_agents/test_conversation_context_ordering.py", "repo_id": "langflow-ai/langflow", "loc": 15, "tested_modules": ["langchain_core.messages", "lfx.base.agents.altk_base_agent", "lfx.schema.message"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/diffusers:src/diffusers/modular_pipelines/qwenimage/modular_pipeline.py:QwenImageEditModularPipeline:class_doc
Write a class-level docstring for `QwenImageEditModularPipeline` (inherits from ModularPipeline, QwenImageLoraLoaderMixin) which has methods: `default_height`, `default_width`, `default_sample_size`, `vae_scale_factor`, `num_channels_latents`.
A ModularPipeline for QwenImage-Edit. > [!WARNING] > This is an experimental feature and is likely to change in the future.
documentation
1
{"doc_type": "class", "class_name": "QwenImageEditModularPipeline", "file_path": "src/diffusers/modular_pipelines/qwenimage/modular_pipeline.py", "repo_id": "huggingface/diffusers", "char_length": 124, "methods": ["default_height", "default_width", "default_sample_size", "vae_scale_factor", "num_channels_latents", "is_guidance_distilled", "requires_unconditional_embeds"]}
ray-project/ray:release/nightly_tests/dataset/training_ingest_benchmark.py:S3ParquetDataLoader.create_dataset
# Context: import io from typing import Dict, List, Optional import ray from PIL import Image class BenchmarkConfig: ... class BaseDataLoader(ABC): ... class S3UrlImageDataLoader(BaseDataLoader): ... class S3ReadImagesDataLoader(BaseDataLoader): ... def create_data_loader(data_loader: str, split: str) -> BaseDataLoader: ... def benchmark_iteration(dataset: ray.data.Dataset, batch_size: int, prefetch_batches: int, num_batches: int, simulated_training_time: float, device: str, pin_memory: bool) -> Dict[str, float]: ... def run_benchmark(config: BenchmarkConfig) -> List[Dict]: ... def print_summary(results: List[Dict]): ... def main(): ... class S3ParquetDataLoader(BaseDataLoader): S3_ROOT = "s3://ray-benchmark-data-internal-us-west-2/imagenet/parquet_split" SPLIT_DIRS = BaseDataLoader.make_split_dirs(S3_ROOT) def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None): """Initialize the data loader with base dataset cache.""" super().__init__(data_dir, label_to_id_map) self._base_dataset_cache: Optional[ray.data.Dataset] = None def get_data_dir(cls, split: str) -> str: ... def get_base_dataset(self) -> ray.data.Dataset: ... # Task: Write a Python method `create_dataset` for the class `S3ParquetDataLoader` to create dataset by applying map to the cached base dataset. Parameters: transform_type: str, batch_size: int, num_batches: int, num_image_columns: int Returns: ray.data.Dataset
def create_dataset( self, transform_type: str, batch_size: int, num_batches: int, num_image_columns: int, ) -> ray.data.Dataset: """Create dataset by applying map to the cached base dataset.""" limit = self.compute_limit(batch_size, num_batches) transform = self.get_transform(transform_type) # Capture instance variables for closure label_to_id_map = self.label_to_id_map def process_row(row: Dict) -> Dict: image_pil = Image.open(io.BytesIO(row["image"])).convert("RGB") processed = BaseDataLoader.tensor_to_numpy(transform(image_pil)) BaseDataLoader.add_image_columns(row, processed, num_image_columns) row["label"] = label_to_id_map.get(row["label"], -1) return row return self.get_base_dataset().limit(limit).map(process_row)
function_simple
0
{"cognitive_complexity": 0, "loc": 22, "code_loc": 10, "docstring_loc": 1, "function_name": "create_dataset", "class_name": "S3ParquetDataLoader", "qualname": "S3ParquetDataLoader.create_dataset", "file_path": "release/nightly_tests/dataset/training_ingest_benchmark.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/core/types.py:BaseFile.filename
# Context: class _FileSourceCoercer: ... class ImageFile(BaseFile): ... class PDFFile(BaseFile): ... class TextFile(BaseFile): ... class AudioFile(BaseFile): ... class VideoFile(BaseFile): ... class File(BaseFile): ... class BaseFile(ABC, BaseModel): def _file_source(self) -> FileSource: ... def content_type(self) -> str: ... def read(self) -> bytes: ... async def aread(self) -> bytes: ... def read_text(self, encoding: str) -> str: ... def _unpack_key(self) -> str: ... def keys(self) -> list[str]: ... def __getitem__(self, key: str) -> Self: ... # Task: Write a Python method `filename` for the class `BaseFile` to get the filename from the source. Returns: str | None
def filename(self) -> str | None: """Get the filename from the source.""" return self._file_source.filename
function_simple
0
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "filename", "class_name": "BaseFile", "qualname": "BaseFile.filename", "file_path": "lib/crewai-files/src/crewai_files/core/types.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "class_runnable"}
zhayujie/chatgpt-on-wechat:agent/skills/frontmatter.py:parse_frontmatter
# Context: import re import json from typing import Dict, Any, Optional, List import yaml def parse_metadata(frontmatter: Dict[str, Any]) -> Optional[SkillMetadata]: ... def _normalize_string_list(value: Any) -> List[str]: ... def parse_boolean_value(value: Optional[str], default: bool) -> bool: ... def get_frontmatter_value(frontmatter: Dict[str, Any], key: str) -> Optional[str]: ... # Task: Write a Python function `parse_frontmatter` to parse YAML-style frontmatter from markdown content. Parameters: content: str Returns: Dict[str, Any]
def parse_frontmatter(content: str) -> Dict[str, Any]: """ Parse YAML-style frontmatter from markdown content. Returns a dictionary of frontmatter fields. """ frontmatter = {} # Match frontmatter block between --- markers match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL) if not match: return frontmatter frontmatter_text = match.group(1) # Try to use PyYAML for proper YAML parsing try: import yaml frontmatter = yaml.safe_load(frontmatter_text) if not isinstance(frontmatter, dict): frontmatter = {} return frontmatter except ImportError: # Fallback to simple parsing if PyYAML not available pass except Exception: # If YAML parsing fails, fall back to simple parsing pass # Simple YAML-like parsing (supports key: value format only) # This is a fallback for when PyYAML is not available for line in frontmatter_text.split('\n'): line = line.strip() if not line or line.startswith('#'): continue if ':' in line: key, value = line.split(':', 1) key = key.strip() value = value.strip() # Try to parse as JSON if it looks like JSON if value.startswith('{') or value.startswith('['): try: value = json.loads(value) except json.JSONDecodeError: pass # Parse boolean values elif value.lower() in ('true', 'false'): value = value.lower() == 'true' # Parse numbers elif value.isdigit(): value = int(value) frontmatter[key] = value return frontmatter
function_complex
1
{"cognitive_complexity": 18, "loc": 57, "code_loc": 34, "docstring_loc": 5, "function_name": "parse_frontmatter", "class_name": null, "qualname": "parse_frontmatter", "file_path": "agent/skills/frontmatter.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": true, "runnable_level": "plib_runnable"}
unclecode/crawl4ai:tests/adaptive/test_embedding_strategy.py:module_doc
Write a module-level docstring for the Python module `test_embedding_strategy` which contains various utilities.
Test and demo script for Embedding-based Adaptive Crawler This script demonstrates the embedding-based adaptive crawling with semantic space coverage and gap-driven expansion.
documentation
1
{"doc_type": "module", "module_name": "test_embedding_strategy", "file_path": "tests/adaptive/test_embedding_strategy.py", "repo_id": "unclecode/crawl4ai", "char_length": 176}
run-llama/llama_index:llama-index-instrumentation/src/llama_index_instrumentation/events/span.py:SpanDropEvent:class_doc
Write a class-level docstring for `SpanDropEvent` (inherits from BaseEvent) which has methods: `class_name`.
SpanDropEvent. Args: err_str (str): Error string.
documentation
1
{"doc_type": "class", "class_name": "SpanDropEvent", "file_path": "llama-index-instrumentation/src/llama_index_instrumentation/events/span.py", "repo_id": "run-llama/llama_index", "char_length": 54, "methods": ["class_name"]}
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_dialog_edge_cases.py:TestDialogEdgeCases.test_concurrent_dialog_operations
# Context: import pytest from common import create_dialog, delete_dialog, get_dialog, update_dialog from concurrent.futures import ThreadPoolExecutor, as_completed class TestDialogEdgeCases: def test_create_dialog_with_tavily_api_key(self, WebApiAuth): ... def test_create_dialog_with_different_embedding_models(self, WebApiAuth): ... def test_create_dialog_with_extremely_long_system_prompt(self, WebApiAuth): ... def test_create_dialog_with_unicode_characters(self, WebApiAuth): ... def test_create_dialog_with_extreme_parameter_values(self, WebApiAuth): ... def test_create_dialog_with_negative_parameter_values(self, WebApiAuth): ... def test_update_dialog_with_empty_kb_ids(self, WebApiAuth, add_dialog_func): ... def test_update_dialog_with_null_values(self, WebApiAuth, add_dialog_func): ... def test_dialog_with_complex_prompt_parameters(self, WebApiAuth, add_dataset_func): ... def test_dialog_with_malformed_prompt_parameters(self, WebApiAuth): ... def test_dialog_operations_with_special_ids(self, WebApiAuth): ... def test_dialog_with_extremely_large_llm_settings(self, WebApiAuth): ... # Task: Write a Python test method `test_concurrent_dialog_operations` in test class `TestDialogEdgeCases` to test concurrent operations on the same dialog. Module under test: common, concurrent.futures
def test_concurrent_dialog_operations(self, WebApiAuth, add_dialog_func): """Test concurrent operations on the same dialog""" from concurrent.futures import ThreadPoolExecutor, as_completed _, dialog_id = add_dialog_func def update_operation(i): payload = {"dialog_id": dialog_id, "name": f"concurrent_update_{i}", "prompt_config": {"system": f"You are assistant number {i}.", "parameters": []}} return update_dialog(WebApiAuth, payload) with ThreadPoolExecutor(max_workers=5) as executor: futures = [executor.submit(update_operation, i) for i in range(10)] responses = [future.result() for future in as_completed(futures)] successful_updates = sum(1 for response in responses if response["code"] == 0) assert successful_updates > 0, "No updates succeeded" res = get_dialog(WebApiAuth, {"dialog_id": dialog_id}) assert res["code"] == 0, res
test
1
{"function_name": "test_concurrent_dialog_operations", "class_name": "TestDialogEdgeCases", "qualname": "TestDialogEdgeCases.test_concurrent_dialog_operations", "file_path": "test/testcases/test_web_api/test_dialog_app/test_dialog_edge_cases.py", "repo_id": "infiniflow/ragflow", "loc": 20, "tested_modules": ["common", "concurrent.futures"], "has_docstring": true, "runnable_level": "project_runnable"}
mem0ai/mem0:mem0/configs/llms/openai.py:OpenAIConfig:class_doc
Write a class-level docstring for `OpenAIConfig` (inherits from BaseLlmConfig) which has methods: `__init__`.
Configuration class for OpenAI and OpenRouter-specific parameters. Inherits from BaseLlmConfig and adds OpenAI-specific settings.
documentation
1
{"doc_type": "class", "class_name": "OpenAIConfig", "file_path": "mem0/configs/llms/openai.py", "repo_id": "mem0ai/mem0", "char_length": 129, "methods": ["__init__"]}
run-llama/llama_index:llama-index-core/tests/test_rate_limiter.py:test_embedding_without_rate_limiter_works
# Context: from llama_index.core.embeddings.mock_embed_model import MockEmbedding def test_base_rate_limiter_is_abstract() -> None: ... def test_token_bucket_is_subclass_of_base() -> None: ... def test_rate_limiter_alias_is_token_bucket() -> None: ... def test_instance_is_base_rate_limiter() -> None: ... def test_custom_rate_limiter_subclass() -> None: ... def test_creation_rpm_only() -> None: ... def test_creation_tpm_only() -> None: ... def test_creation_both() -> None: ... def test_validation_rejects_zero() -> None: ... def test_validation_rejects_negative() -> None: ... def test_burst_within_limit() -> None: ... def test_acquire_blocks_when_exhausted() -> None: ... def test_refill_caps_at_max() -> None: ... async def test_async_acquire_burst_within_limit() -> None: ... async def test_async_acquire_tpm_limiting() -> None: ... async def test_concurrent_async_rate_limiting() -> None: ... def test_llm_sync_chat_calls_acquire() -> None: ... async def test_llm_async_chat_calls_async_acquire() -> None: ... def test_llm_sync_complete_calls_acquire() -> None: ... async def test_llm_async_complete_calls_async_acquire() -> None: ... def test_llm_without_rate_limiter_works() -> None: ... def test_embedding_single_calls_acquire() -> None: ... def test_embedding_batch_calls_acquire_per_batch() -> None: ... def test_shared_rate_limiter_across_instances() -> None: ... def test_shared_limiter_between_llm_and_embedding() -> None: ... # Task: Write a Python test function `test_embedding_without_rate_limiter_works` to verify the behavior of `embedding_without_rate_limiter_works`. Module under test: llama_index.core.base.llms.types, llama_index.core.llms.mock, llama_index.core.rate_limiter
def test_embedding_without_rate_limiter_works() -> None: from llama_index.core.embeddings.mock_embed_model import MockEmbedding embed = MockEmbedding(embed_dim=8) assert embed.rate_limiter is None result = embed.get_text_embedding("test") assert len(result) == 8
test
1
{"function_name": "test_embedding_without_rate_limiter_works", "class_name": null, "qualname": "test_embedding_without_rate_limiter_works", "file_path": "llama-index-core/tests/test_rate_limiter.py", "repo_id": "run-llama/llama_index", "loc": 7, "tested_modules": ["llama_index.core.base.llms.types", "llama_index.core.llms.mock", "llama_index.core.rate_limiter", "llama_index.core.embeddings.mock_embed_model", "llama_index.core.embeddings.mock_embed_model"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py:Ernie4_5_VLMoeModel.get_rope_index
# Context: import itertools import torch class Ernie4_5_VLMoeVisionConfig(Qwen2VLVisionConfig): ... class Ernie4_5_VLMoeTextConfig(Ernie4_5_MoeConfig, PreTrainedConfig): ... class Ernie4_5_VLMoeConfig(PreTrainedConfig): ... class Ernie4_5_VLMoeTextRotaryEmbedding(nn.Module): ... def rotate_half_text(x): ... def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim): ... class Ernie4_5_VLMoeTextAttention(Ernie4_5_MoeAttention): ... class Ernie4_5_VLMoeRMSNorm(Ernie4_5_MoeRMSNorm): ... class Ernie4_5_VLMoeMLP(Ernie4_5_MoeMLP): ... class Ernie4_5_VLMoeMoeStatics(Ernie4_5_MoeStatics): ... class Ernie4_5_VLMoeMoeTopKRouter(Ernie4_5_MoeTopKRouter): ... class Ernie4_5_VLMoeMoeExperts(Ernie4_5_MoeExperts): ... class Ernie4_5_VLMoeSparseMoeBlock(nn.Module): ... class Ernie4_5_VLMoeMoeBlock(nn.Module): ... class Ernie4_5_VLMoeDecoderLayer(GradientCheckpointingLayer): ... class Ernie4_5_VLMoeVisionAttention(Qwen2_5_VLVisionAttention): ... class Ernie4_5_VLMoeVisionBlock(Qwen2_5_VLVisionBlock): ... class Ernie4_5_VLMoePreTrainedModel(Qwen2_5_VLPreTrainedModel): ... class Ernie4_5_VLMoeTextModel(Ernie4_5_MoeModel): ... class Ernie4_5VLVisionMLP(VisionMlp): ... class Ernie4_5_VLMoePatchEmbed(Qwen2_5_VisionPatchEmbed): ... class Ernie4_5_VLMoeVisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding): ... class Ernie4_5_VLMoeVisionTransformerPretrainedModel(Qwen2VisionTransformerPretrainedModel): ... class Ernie4_5_VLMoeVisionMLP(nn.Module): ... class Ernie4_5_VLMoeVariableResolutionResamplerModel(nn.Module): ... class Ernie4_5_VLMoeForConditionalGeneration(Glm4vForConditionalGeneration, GenerationMixin): ... class Ernie4_5_VLMoeImageProcessorKwargs(Glm4vImageProcessorKwargs): ... class Ernie4_5_VLMoeImageProcessor(Glm4vImageProcessor): ... class Ernie4_5_VLMoeImageProcessorFast(Glm4vImageProcessorFast): ... class Ernie4_5_VL_MoeForConditionalGeneration(Ernie4_5_VLMoeForConditionalGeneration): ... class Ernie4_5_VL_MoeConfig(Ernie4_5_VLMoeConfig): ... class Ernie4_5_VL_MoeTextConfig(Ernie4_5_VLMoeTextConfig): ... class Ernie4_5_VL_MoeVisionConfig(Ernie4_5_VLMoeVisionConfig): ... class Ernie4_5_VL_MoePreTrainedModel(Ernie4_5_VLMoePreTrainedModel): ... class Ernie4_5_VL_MoeModel(Ernie4_5_VLMoeModel): ... class Ernie4_5_VL_MoeTextModel(Ernie4_5_VLMoeTextModel): ... class Ernie4_5_VL_MoeVisionTransformerPretrainedModel(Ernie4_5_VLMoeVisionTransformerPretrainedModel): ... class Ernie4_5_VL_MoeVariableResolutionResamplerModel(Ernie4_5_VLMoeVariableResolutionResamplerModel): ... class Ernie4_5_VL_MoeImageProcessor(Ernie4_5_VLMoeImageProcessor): ... class Ernie4_5_VL_MoeImageProcessorFast(Ernie4_5_VLMoeImageProcessorFast): ... class Ernie4_5_VLMoeModel(Qwen2VLModel): _checkpoint_conversion_mapping = {"^norm": "language_model.norm"} _no_split_modules = ["Ernie4_5_VLMoeDecoderLayer", "Ernie4_5_VLMoeVisionBlock"] def __init__(self, config: Ernie4_5_VLMoeConfig): super().__init__(config) del self.visual self.vision_tower = Ernie4_5_VLMoeVisionTransformerPretrainedModel._from_config(config.vision_config) self.resampler_model = Ernie4_5_VLMoeVariableResolutionResamplerModel(config) def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: torch.LongTensor | None, **kwargs) -> tuple | BaseModelOutputWithPooling: ... def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: torch.LongTensor | None, **kwargs) -> tuple | BaseModelOutputWithPooling: ... def forward(self, input_ids: torch.LongTensor, attention_mask: torch.Tensor | None, position_ids: torch.LongTensor | None, mm_token_type_ids: torch.IntTensor | None, moe_mm_token_type_ids: torch.IntTensor | None, past_key_values: Cache | None, inputs_embeds: torch.FloatTensor | None, use_cache: bool | None, pixel_values: torch.Tensor | None, pixel_values_videos: torch.FloatTensor | None, image_grid_thw: torch.LongTensor | None, video_grid_thw: torch.LongTensor | None, rope_deltas: torch.LongTensor | None, cache_position: torch.LongTensor | None, **kwargs) -> tuple | MoeModelOutputWithPast: ... # Task: Write a Python method `get_rope_index` for the class `Ernie4_5_VLMoeModel` to calculate the 3D rope index based on image and video's sizes. The utility expects a `vision + text`. Parameters: input_ids: torch.LongTensor, mm_token_type_ids: torch.IntTensor, image_grid_thw: torch.LongTensor | None, video_grid_thw: torch.LongTensor | None, attention_mask: torch.Tensor | None Returns: tuple[torch.Tensor, torch.Tensor]
def get_rope_index( self, input_ids: torch.LongTensor, mm_token_type_ids: torch.IntTensor, image_grid_thw: torch.LongTensor | None = None, video_grid_thw: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, **kwargs, ) -> tuple[torch.Tensor, torch.Tensor]: """ Calculate the 3D rope index based on image and video's sizes. The utility expects a `vision + text` sequence and will error out otherwise. For pure text sequence, please rely on model's auto-inferred position ids. In a mixed vision + text sequence, vision tokens use 3D RoPE (temporal, height, width) while text tokens use standard 1D RoPE. Example: Temporal patches: 3; Height patches: 2; Width patches: 2 Each vision input results in (temporal x height × width) positions. Here: 3 x 2 × 2 = 12 positions total. Temporal position IDs are spaced by: `interval = tokens_per_second * temporal_patch_size / fps` If fps = 1; tokens_per_second = 25; temporal_patch_size = 2, temporal IDs increase by 50 for each temporal patch: `[0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]` Height IDs repeat per row: `[0, 0, 1, 1, ...]` Width IDs alternate per column: `[0, 1, 0, 1, ...]` Text tokens follow standard 1D RoPE and the position IDs grow consequently with a step of `1` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`): Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2). image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ temporal_merge_size = self.config.vision_config.temporal_merge_size spatial_merge_size = self.config.vision_config.spatial_merge_size mrope_position_deltas = [] position_ids = torch.zeros( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device, ) grid_iters = { 1: iter(image_grid_thw) if image_grid_thw is not None else None, 2: iter(video_grid_thw) if video_grid_thw is not None else None, } for batch_idx, current_input_ids in enumerate(input_ids): input_token_type = mm_token_type_ids[batch_idx] if attention_mask is not None: current_input_ids = current_input_ids[attention_mask[batch_idx].bool()] input_token_type = input_token_type[attention_mask[batch_idx].bool()] input_type_group = [] for key, group in itertools.groupby(enumerate(input_token_type.tolist()), lambda x: x[1]): group = list(group) start_index = group[0][0] end_index = group[-1][0] + 1 input_type_group.append((key, start_index, end_index)) current_pos = 0 llm_pos_ids_list = [] for modality_type, start_idx, end_idx in input_type_group: # text == 0 if modality_type == 0: text_len = end_idx - start_idx llm_pos_ids_list.append( torch.arange(text_len, device=input_ids.device).view(1, -1).expand(3, -1) + current_pos ) current_pos += text_len # image == 1, video == 2 else: grid_thw = next(grid_iters[modality_type]) t_merge_size = 1 if modality_type == 1 else temporal_merge_size vision_position_ids = self.get_vision_position_ids( current_pos, grid_thw, t_merge_size, spatial_merge_size, device=input_ids.device ) llm_pos_ids_list.append(vision_position_ids) current_pos += max(grid_thw[1], grid_thw[2]) // spatial_merge_size llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) if attention_mask is not None: position_ids[:, batch_idx, attention_mask[batch_idx].bool()] = llm_positions.to(position_ids.device) else: position_ids[:, batch_idx] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(current_input_ids)) mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) return position_ids, mrope_position_deltas
function_complex
0
{"cognitive_complexity": 20, "loc": 105, "code_loc": 50, "docstring_loc": 40, "function_name": "get_rope_index", "class_name": "Ernie4_5_VLMoeModel", "qualname": "Ernie4_5_VLMoeModel.get_rope_index", "file_path": "src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"}
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py:StreamingWorkflowCallbacks:class_doc
Write a class-level docstring for `StreamingWorkflowCallbacks` (inherits from SingleAgentWorkflowCallbacks) which has methods: `__init__`, `on_run`, `on_tool_call`, `on_handoff`, `on_turn_start`.
Custom callbacks to monitor the streaming voice workflow.
documentation
0
{"doc_type": "class", "class_name": "StreamingWorkflowCallbacks", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "char_length": 57, "methods": ["__init__", "on_run", "on_tool_call", "on_handoff", "on_turn_start", "on_turn_end"]}
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/cache/upload_cache.py:get_upload_cache
# Context: from typing import TYPE_CHECKING, Any from crewai_files.core.constants import DEFAULT_MAX_CACHE_ENTRIES, DEFAULT_TTL_SECONDS class CachedUpload: ... def _make_key(file_hash: str, provider: str) -> str: ... def _compute_file_hash_streaming(chunks: Iterator[bytes]) -> str: ... def _compute_file_hash(file: FileInput) -> str: ... class UploadCache: ... def reset_upload_cache() -> None: ... def _cleanup_on_exit() -> None: ... # Task: Write a Python function `get_upload_cache` to get or create the default upload cache. Parameters: ttl: int, namespace: str, cache_type: str Returns: UploadCache
def get_upload_cache( ttl: int = DEFAULT_TTL_SECONDS, namespace: str = "crewai_uploads", cache_type: str = "memory", **cache_kwargs: Any, ) -> UploadCache: """Get or create the default upload cache. Args: ttl: Default TTL in seconds. namespace: Cache namespace. cache_type: Backend type ("memory" or "redis"). **cache_kwargs: Additional args for cache backend. Returns: The upload cache instance. """ global _default_cache if _default_cache is None: _default_cache = UploadCache( ttl=ttl, namespace=namespace, cache_type=cache_type, **cache_kwargs, ) return _default_cache
function_simple
0
{"cognitive_complexity": 1, "loc": 26, "code_loc": 9, "docstring_loc": 11, "function_name": "get_upload_cache", "class_name": null, "qualname": "get_upload_cache", "file_path": "lib/crewai-files/src/crewai_files/cache/upload_cache.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/_common/tests/test_signature.py:TestValidateArgs.test_valid_keyword_args
# Context: from ray._common.signature import ( DUMMY_TYPE, extract_signature, flatten_args, get_signature, recover_args, validate_args, ) class TestGetSignature: ... class TestExtractSignature: ... class TestFlattenArgs: ... class TestRecoverArgs: ... class TestIntegration: ... class TestValidateArgs: def test_valid_positional_args(self): ... def test_valid_mixed_args(self): ... def test_too_many_positional_args(self): ... def test_missing_required_args(self): ... def test_unexpected_keyword_args(self): ... def test_duplicate_args(self): ... def test_varargs_validation(self): ... # Task: Write a Python test method `test_valid_keyword_args` in test class `TestValidateArgs` to test validation with valid keyword arguments. Module under test: typing, ray._common.signature
def test_valid_keyword_args(self): """Test validation with valid keyword arguments.""" def test_func(a, b=20, c=30): return a + b + c params = extract_signature(test_func) # Should not raise an exception validate_args(params, (1,), {"b": 2}) validate_args(params, (1,), {"b": 2, "c": 3}) validate_args(params, (), {"a": 1, "b": 2, "c": 3})
test
0
{"function_name": "test_valid_keyword_args", "class_name": "TestValidateArgs", "qualname": "TestValidateArgs.test_valid_keyword_args", "file_path": "python/ray/_common/tests/test_signature.py", "repo_id": "ray-project/ray", "loc": 11, "tested_modules": ["typing", "ray._common.signature"], "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:browser_use/browser/watchdogs/default_action_watchdog.py:DefaultActionWatchdog._get_key_code_for_char
Write a Python method `_get_key_code_for_char` for the class `DefaultActionWatchdog` to get the proper key code for a character (like Playwright does). Parameters: char: str Returns: str
def _get_key_code_for_char(self, char: str) -> str: """Get the proper key code for a character (like Playwright does).""" # Key code mapping for common characters (using proper base keys + modifiers) key_codes = { ' ': 'Space', '.': 'Period', ',': 'Comma', '-': 'Minus', '_': 'Minus', # Underscore uses Minus with Shift '@': 'Digit2', # @ uses Digit2 with Shift '!': 'Digit1', # ! uses Digit1 with Shift (not 'Exclamation') '?': 'Slash', # ? uses Slash with Shift ':': 'Semicolon', # : uses Semicolon with Shift ';': 'Semicolon', '(': 'Digit9', # ( uses Digit9 with Shift ')': 'Digit0', # ) uses Digit0 with Shift '[': 'BracketLeft', ']': 'BracketRight', '{': 'BracketLeft', # { uses BracketLeft with Shift '}': 'BracketRight', # } uses BracketRight with Shift '/': 'Slash', '\\': 'Backslash', '=': 'Equal', '+': 'Equal', # + uses Equal with Shift '*': 'Digit8', # * uses Digit8 with Shift '&': 'Digit7', # & uses Digit7 with Shift '%': 'Digit5', # % uses Digit5 with Shift '$': 'Digit4', # $ uses Digit4 with Shift '#': 'Digit3', # # uses Digit3 with Shift '^': 'Digit6', # ^ uses Digit6 with Shift '~': 'Backquote', # ~ uses Backquote with Shift '`': 'Backquote', "'": 'Quote', '"': 'Quote', # " uses Quote with Shift } # Numbers if char.isdigit(): return f'Digit{char}' # Letters if char.isalpha(): return f'Key{char.upper()}' # Special characters if char in key_codes: return key_codes[char] # Fallback for unknown characters return f'Key{char.upper()}'
function_simple
0
{"cognitive_complexity": 3, "loc": 50, "code_loc": 39, "docstring_loc": 1, "function_name": "_get_key_code_for_char", "class_name": "DefaultActionWatchdog", "qualname": "DefaultActionWatchdog._get_key_code_for_char", "file_path": "browser_use/browser/watchdogs/default_action_watchdog.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "self_contained"}
apache/airflow:helm-tests/tests/helm_tests/dagprocessor/test_labels_deployment.py:TestDagProcessorDeployment.test_should_add_global_labels
# Context: import jmespath from chart_utils.helm_template_generator import render_chart class TestDagProcessorDeployment: AIRFLOW_VERSION = "3.0.0" TEMPLATE_FILE = "templates/dag-processor/dag-processor-deployment.yaml" def test_should_add_component_specific_labels(self): ... def test_should_merge_global_and_component_specific_labels(self): ... def test_component_specific_labels_should_override_global_labels(self): ... # Task: Write a Python test method `test_should_add_global_labels` in test class `TestDagProcessorDeployment` to test adding only .Values.labels. Module under test: __future__, chart_utils.helm_template_generator
def test_should_add_global_labels(self): """Test adding only .Values.labels.""" docs = render_chart( values={ "airflowVersion": self.AIRFLOW_VERSION, "labels": {"test_global_label": "test_global_label_value"}, }, show_only=[self.TEMPLATE_FILE], ) assert "test_global_label" in jmespath.search("spec.template.metadata.labels", docs[0]) assert ( jmespath.search("spec.template.metadata.labels", docs[0])["test_global_label"] == "test_global_label_value" )
test
1
{"function_name": "test_should_add_global_labels", "class_name": "TestDagProcessorDeployment", "qualname": "TestDagProcessorDeployment.test_should_add_global_labels", "file_path": "helm-tests/tests/helm_tests/dagprocessor/test_labels_deployment.py", "repo_id": "apache/airflow", "loc": 15, "tested_modules": ["__future__", "chart_utils.helm_template_generator"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py:TestEcsExecutorTask.test_repr
# Context: from airflow.providers.amazon.aws.executors.ecs.utils import ( AllEcsConfigKeys, EcsExecutorException, EcsExecutorTask, EcsQueuedTask, EcsTaskCollection, EcsTaskInfo, RunTaskKwargsConfigKeys, _recursive_flatten_dict, camelize_dict_keys, parse_assign_public_ip, ) class TestEcsQueuedTask: ... class TestEcsTaskInfo: ... class TestRunTaskKwargsConfigKeys: ... class TestAllEcsConfigKeys: ... class TestEcsExecutorException: ... class TestEcsTaskCollection: ... class TestRecursiveFlattenDict: ... class TestParseAssignPublicIp: ... class TestCamelizeDictKeys: ... class TestEcsExecutorTask: def test_ecs_executor_task_creation(self): ... def test_get_task_state_running(self): ... def test_get_task_state_queued(self): ... def test_get_task_state_removed_timeout(self): ... def test_get_task_state_running_not_finished(self): ... def test_get_task_state_success(self): ... def test_get_task_state_failed(self): ... # Task: Write a Python test method `test_repr` in test class `TestEcsExecutorTask` to test __repr__ method. Module under test: __future__, airflow.models.taskinstance, airflow.providers.amazon.aws.executors.ecs.utils
def test_repr(self): """Test __repr__ method.""" task = EcsExecutorTask( task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task", last_status="RUNNING", desired_status="RUNNING", containers=[{"name": "container1", "exit_code": 0}], ) expected = "(arn:aws:ecs:us-east-1:123456789012:task/test-task, RUNNING->RUNNING, running)" assert repr(task) == expected
test
1
{"function_name": "test_repr", "class_name": "TestEcsExecutorTask", "qualname": "TestEcsExecutorTask.test_repr", "file_path": "providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py", "repo_id": "apache/airflow", "loc": 10, "tested_modules": ["__future__", "airflow.models.taskinstance", "airflow.providers.amazon.aws.executors.ecs.utils", "airflow.utils.state"], "has_docstring": true, "runnable_level": "project_runnable"}
unclecode/crawl4ai:tests/test_prefetch_mode.py:TestQuickExtractLinksEdgeCases.test_no_links_in_page
# Context: from crawl4ai.utils import quick_extract_links class TestQuickExtractLinks: ... class TestQuickExtractLinksEdgeCases: def test_links_in_nested_elements(self): ... def test_link_with_nested_elements(self): ... def test_protocol_relative_urls(self): ... def test_whitespace_in_href(self): ... # Task: Write a Python test method `test_no_links_in_page` in test class `TestQuickExtractLinksEdgeCases` to test page with no links. Module under test: crawl4ai.utils
def test_no_links_in_page(self): """Test page with no links.""" html = ''' <html> <body> <h1>No Links Here</h1> <p>Just some text content.</p> </body> </html> ''' result = quick_extract_links(html, "https://example.com") assert result["internal"] == [] assert result["external"] == []
test
1
{"function_name": "test_no_links_in_page", "class_name": "TestQuickExtractLinksEdgeCases", "qualname": "TestQuickExtractLinksEdgeCases.test_no_links_in_page", "file_path": "tests/test_prefetch_mode.py", "repo_id": "unclecode/crawl4ai", "loc": 14, "tested_modules": ["crawl4ai.utils"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/entrypoints/openai/test_completion_with_function_calling.py:test_no_args_tool_call
# Context: import datetime import openai # use the official client for correctness check import pytest def server(): ... async def client(server): ... async def test_function_tool_use(client: openai.AsyncOpenAI, model_name: str, stream: bool, tool_choice: str | dict, enable_thinking: bool): ... def k2_server(): ... async def k2_client(k2_server): ... async def test_tool_id_kimi_k2(k2_client: openai.AsyncOpenAI, model_name: str, stream: bool, tool_choice: str): ... async def test_named_tool_use(client: openai.AsyncOpenAI, sample_json_schema): ... async def test_inconsistent_tool_choice_and_tools(client: openai.AsyncOpenAI, sample_json_schema): ... # Task: Write a Python test function `test_no_args_tool_call` to verify the behavior of `no_args_tool_call`. Module under test: utils
async def test_no_args_tool_call( client: openai.AsyncOpenAI, model_name: str, arguments: str ): # Step 1: Define a tool that requires no parameters tools = [ { "type": "function", "function": { "name": "get_current_time", "description": "Get the current date and time. No parameters needed.", "parameters": { "type": "object", "properties": {}, # No parameters "required": [], # No required fields }, }, } ] messages = [{"role": "user", "content": "What time is it now?"}] # Step 2: Send user message and let model decide whether to call the tool response = await client.chat.completions.create( model=model_name, messages=messages, tools=tools, tool_choice="auto", # Let model choose automatically ) # Step 3: Check if model wants to call a tool message = response.choices[0].message if message.tool_calls: # Get the first tool call tool_call = message.tool_calls[0] tool_name = tool_call.function.name # Step 4: Execute the tool locally (no parameters) if tool_name == "get_current_time": # Test both empty string and "{}" for no-arg tool calls tool_call.function.arguments = arguments messages.append(message) current_time = datetime.datetime.now() result = current_time.isoformat() messages.append( { "role": "tool", "tool_call_id": tool_call.id, "content": result, } ) # Step 5: Send tool result back to model to continue conversation final_response = await client.chat.completions.create( model=model_name, messages=messages, ) # Output final natural language response assert final_response.choices[0].message.content is not None else: # No tool called — just print model's direct reply assert message.content is not None
test
1
{"function_name": "test_no_args_tool_call", "class_name": null, "qualname": "test_no_args_tool_call", "file_path": "tests/entrypoints/openai/test_completion_with_function_calling.py", "repo_id": "vllm-project/vllm", "loc": 58, "tested_modules": ["utils"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py:TavilyExtractorTool._run
# Context: import json class TavilyExtractorToolSchema(BaseModel): ... class TavilyExtractorTool(BaseTool): model_config = ConfigDict(arbitrary_types_allowed=True) def __init__(self, **kwargs: Any): """Initializes the TavilyExtractorTool. Args: **kwargs: Additional keyword arguments. """ super().__init__(**kwargs) if TAVILY_AVAILABLE: self.client = TavilyClient(api_key=self.api_key, proxies=self.proxies) self.async_client = AsyncTavilyClient( api_key=self.api_key, proxies=self.proxies ) else: try: import subprocess import click except ImportError: raise ImportError( "The 'tavily-python' package is required. 'click' and 'subprocess' are also needed to assist with installation if the package is missing. " "Please install 'tavily-python' manually (e.g., 'uv add tavily-python') and ensure 'click' and 'subprocess' are available." ) from None if click.confirm( "You are missing the 'tavily-python' package, which is required for TavilyExtractorTool. Would you like to install it?" ): try: subprocess.run(["uv pip", "install", "tavily-python"], check=True) # noqa: S607 raise ImportError( "'tavily-python' has been installed. Please restart your Python application to use the TavilyExtractorTool." ) except subprocess.CalledProcessError as e: raise ImportError( f"Attempted to install 'tavily-python' but failed: {e}. " f"Please install it manually to use the TavilyExtractorTool." ) from e else: raise ImportError( "The 'tavily-python' package is required to use the TavilyExtractorTool. " "Please install it with: uv add tavily-python" ) async def _arun(self, urls: list[str] | str) -> str: ... # Task: Write a Python method `_run` for the class `TavilyExtractorTool` to synchronously extracts content from the given URL(s). Parameters: urls: list[str] | str Returns: str
def _run( self, urls: list[str] | str, ) -> str: """Synchronously extracts content from the given URL(s). Args: urls: The URL(s) to extract data from. Returns: A JSON string containing the extracted data. """ if not self.client: raise ValueError( "Tavily client is not initialized. Ensure 'tavily-python' is installed and API key is set." ) return json.dumps( self.client.extract( urls=urls, extract_depth=self.extract_depth, include_images=self.include_images, timeout=self.timeout, ), indent=2, )
function_simple
0
{"cognitive_complexity": 1, "loc": 26, "code_loc": 13, "docstring_loc": 8, "function_name": "_run", "class_name": "TavilyExtractorTool", "qualname": "TavilyExtractorTool._run", "file_path": "lib/crewai-tools/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "class_runnable"}
commaai/openpilot:selfdrive/ui/translations/potools.py:merge_po
# Context: from pathlib import Path class POEntry: ... def _parse_quoted(s: str) -> str: ... def parse_po(path: str | Path) -> tuple[POEntry | None, list[POEntry]]: ... def _quote(s: str) -> str: ... def write_po(path: str | Path, header: POEntry | None, entries: list[POEntry]) -> None: ... def extract_strings(files: list[str], basedir: str) -> list[POEntry]: ... def generate_pot(entries: list[POEntry], pot_path: str | Path) -> None: ... def init_po(pot_path: str | Path, po_path: str | Path, language: str) -> None: ... # Task: Write a Python function `merge_po` to update a .po file with entries from a .pot template (replaces msgmerge --update). Parameters: po_path: str | Path, pot_path: str | Path Returns: None
def merge_po(po_path: str | Path, pot_path: str | Path) -> None: """Update a .po file with entries from a .pot template (replaces msgmerge --update).""" po_header, po_entries = parse_po(po_path) _, pot_entries = parse_po(pot_path) existing = {e.msgid: e for e in po_entries} merged = [] for pot_e in pot_entries: if pot_e.msgid in existing: old = existing[pot_e.msgid] old.source_refs = pot_e.source_refs old.flags = pot_e.flags old.comments = pot_e.comments if pot_e.is_plural: old.msgid_plural = pot_e.msgid_plural merged.append(old) else: merged.append(pot_e) merged.sort(key=lambda e: e.msgid) write_po(po_path, po_header, merged)
function_complex
0
{"cognitive_complexity": 7, "loc": 22, "code_loc": 17, "docstring_loc": 1, "function_name": "merge_po", "class_name": null, "qualname": "merge_po", "file_path": "selfdrive/ui/translations/potools.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_level": "file_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/utils/helpers.py:get_mime_type
# Context: import mimetypes from pathlib import Path def build_content_type_from_extension(extension: str): ... # Task: Write a Python function `get_mime_type` to get the MIME type of a file based on its extension. Parameters: file_path: str | Path Returns: str
def get_mime_type(file_path: str | Path) -> str: """Get the MIME type of a file based on its extension. Args: file_path: Path to the file Returns: MIME type string (e.g., 'image/jpeg', 'image/png') Raises: ValueError: If MIME type cannot be determined """ mime_type, _ = mimetypes.guess_type(str(file_path)) if mime_type is None: msg = f"Could not determine MIME type for: {file_path}" raise ValueError(msg) return mime_type
function_simple
1
{"cognitive_complexity": 1, "loc": 17, "code_loc": 5, "docstring_loc": 11, "function_name": "get_mime_type", "class_name": null, "qualname": "get_mime_type", "file_path": "src/lfx/src/lfx/utils/helpers.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "slib_runnable"}
sansan0/TrendRadar:mcp_server/server.py:read_article
# Context: import asyncio import json def _get_tools(project_root: Optional[str]): ... async def get_platforms_resource() -> str: ... async def get_rss_feeds_resource() -> str: ... async def get_available_dates_resource() -> str: ... async def get_keywords_resource() -> str: ... async def resolve_date_range(expression: str) -> str: ... async def get_latest_news(platforms: Optional[List[str]], limit: int, include_url: bool) -> str: ... async def get_trending_topics(top_n: int, mode: str, extract_mode: str) -> str: ... async def get_latest_rss(feeds: Optional[List[str]], days: int, limit: int, include_summary: bool) -> str: ... async def search_rss(keyword: str, feeds: Optional[List[str]], days: int, limit: int, include_summary: bool) -> str: ... async def get_rss_feeds_status() -> str: ... async def get_news_by_date(date_range: Optional[Union[Dict[str, str], str]], platforms: Optional[List[str]], limit: int, include_url: bool) -> str: ... async def analyze_topic_trend(topic: str, analysis_type: str, date_range: Optional[Union[Dict[str, str], str]], granularity: str, spike_threshold: float, time_window: int, lookahead_hours: int, confidence_threshold: float) -> str: ... async def analyze_data_insights(insight_type: str, topic: Optional[str], date_range: Optional[Union[Dict[str, str], str]], min_frequency: int, top_n: int) -> str: ... async def analyze_sentiment(topic: Optional[str], platforms: Optional[List[str]], date_range: Optional[Union[Dict[str, str], str]], limit: int, sort_by_weight: bool, include_url: bool) -> str: ... async def find_related_news(reference_title: str, date_range: Optional[Union[Dict[str, str], str]], threshold: float, limit: int, include_url: bool) -> str: ... async def generate_summary_report(report_type: str, date_range: Optional[Union[Dict[str, str], str]]) -> str: ... async def aggregate_news(date_range: Optional[Union[Dict[str, str], str]], platforms: Optional[List[str]], similarity_threshold: float, limit: int, include_url: bool) -> str: ... async def compare_periods(period1: Union[Dict[str, str], str], period2: Union[Dict[str, str], str], topic: Optional[str], compare_type: str, platforms: Optional[List[str]], top_n: int) -> str: ... async def search_news(query: str, search_mode: str, date_range: Optional[Union[Dict[str, str], str]], platforms: Optional[List[str]], limit: int, sort_by: str, threshold: float, include_url: bool, include_rss: bool, rss_limit: int) -> str: ... async def get_current_config(section: str) -> str: ... async def get_system_status() -> str: ... async def check_version(proxy_url: Optional[str]) -> str: ... async def trigger_crawl(platforms: Optional[List[str]], save_to_local: bool, include_url: bool) -> str: ... async def sync_from_remote(days: int) -> str: ... async def get_storage_status() -> str: ... async def list_available_dates(source: str) -> str: ... async def read_articles_batch(urls: List[str], timeout: int) -> str: ... async def get_channel_format_guide(channel: Optional[str]) -> str: ... async def get_notification_channels() -> str: ... async def send_notification(message: str, title: str, channels: Optional[List[str]]) -> str: ... def run_server(project_root: Optional[str], transport: str, host: str, port: int): ... # Task: Write a Python async function `read_article` to 读取指定 URL 的文章内容,返回 LLM 友好的 Markdown 格式. Parameters: url: str, timeout: int Returns: str
async def read_article( url: str, timeout: int = 30 ) -> str: """ 读取指定 URL 的文章内容,返回 LLM 友好的 Markdown 格式 通过 Jina AI Reader 将网页转换为干净的 Markdown,自动去除广告、导航栏等噪音内容。 适合用于:阅读新闻正文、获取文章详情、分析文章内容。 **典型使用流程:** 1. 先用 search_news(include_url=True) 搜索新闻获取链接 2. 再用 read_article(url=链接) 读取正文内容 3. AI 对 Markdown 正文进行分析、摘要、翻译等 Args: url: 文章链接(必需),以 http:// 或 https:// 开头 timeout: 请求超时时间(秒),默认 30,最大 60 Returns: JSON格式的文章内容,包含完整 Markdown 正文 Examples: - read_article(url="https://example.com/news/123") Note: - 使用 Jina AI Reader 免费服务(100 RPM 限制) - 每次请求间隔 5 秒(内置速率控制) - 部分付费墙/登录墙页面可能无法完整获取 """ tools = _get_tools() timeout = min(max(timeout, 10), 60) result = await asyncio.to_thread( tools['article'].read_article, url=url, timeout=timeout ) return json.dumps(result, ensure_ascii=False, indent=2)
function_simple
1
{"cognitive_complexity": 0, "loc": 37, "code_loc": 7, "docstring_loc": 26, "function_name": "read_article", "class_name": null, "qualname": "read_article", "file_path": "mcp_server/server.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/devtools/src/crewai_devtools/cli.py:get_github_contributors
# Context: import subprocess from github import Github def run_command(cmd: list[str], cwd: Path | None) -> str: ... def check_gh_installed() -> None: ... def check_git_clean() -> None: ... def update_version_in_file(file_path: Path, new_version: str) -> bool: ... def update_pyproject_dependencies(file_path: Path, new_version: str) -> bool: ... def add_docs_version(docs_json_path: Path, version: str) -> bool: ... def translate_release_notes(release_notes: str, lang: str, client: OpenAI) -> str: ... def _format_changelog_date(lang: str) -> str: ... def update_changelog(changelog_path: Path, version: str, release_notes: str, lang: str) -> bool: ... def update_template_dependencies(templates_dir: Path, new_version: str) -> list[Path]: ... def find_version_files(base_path: Path) -> list[Path]: ... def get_packages(lib_dir: Path) -> list[Path]: ... def get_commits_from_last_tag(tag_name: str, version: str) -> tuple[str, str]: ... def cli() -> None: ... def bump(version: str, dry_run: bool, no_push: bool, no_commit: bool) -> None: ... def tag(dry_run: bool, no_edit: bool) -> None: ... def main() -> None: ... # Task: Write a Python function `get_github_contributors` to get GitHub usernames from commit range using GitHub API. Parameters: commit_range: str Returns: list[str]
def get_github_contributors(commit_range: str) -> list[str]: """Get GitHub usernames from commit range using GitHub API. Args: commit_range: Git commit range (e.g., "abc123..HEAD"). Returns: List of GitHub usernames sorted alphabetically. """ try: # Get GitHub token from gh CLI try: gh_token = run_command(["gh", "auth", "token"]) except subprocess.CalledProcessError: gh_token = None g = Github(login_or_token=gh_token) if gh_token else Github() github_repo = g.get_repo("crewAIInc/crewAI") commit_shas = run_command( ["git", "log", commit_range, "--pretty=format:%H"] ).split("\n") contributors = set() for sha in commit_shas: if not sha: continue try: commit = github_repo.get_commit(sha) if commit.author and commit.author.login: contributors.add(commit.author.login) if commit.commit.message: for line in commit.commit.message.split("\n"): if line.strip().startswith("Co-authored-by:"): if "<" in line and ">" in line: email_part = line.split("<")[1].split(">")[0] if "@users.noreply.github.com" in email_part: username = email_part.split("+")[-1].split("@")[0] contributors.add(username) except Exception: # noqa: S112 continue return sorted(list(contributors)) except Exception as e: console.print( f"[yellow]Warning:[/yellow] Could not fetch GitHub contributors: {e}" ) return []
function_complex
0
{"cognitive_complexity": 46, "loc": 50, "code_loc": 34, "docstring_loc": 8, "function_name": "get_github_contributors", "class_name": null, "qualname": "get_github_contributors", "file_path": "lib/devtools/src/crewai_devtools/cli.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_lmcache_connector_v1.py:TestLMCacheConnectorV1Backend.test_setup_with_existing_port
# Context: class TestLMCacheConnectorV1Backend: def mock_lmcache_check(self): ... def lmcache_backend_basic(self): ... def lmcache_backend_with_extra(self): ... def lmcache_backend_with_port(self): ... def test_setup_basic_config(self, lmcache_backend_basic): ... def test_setup_with_extra_config_no_port(self, lmcache_backend_with_extra): ... # Task: Write a Python test method `test_setup_with_existing_port` in test class `TestLMCacheConnectorV1Backend` to test setup with existing lmcache_rpc_port configuration. Module under test: ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache, ray.serve.llm
def test_setup_with_existing_port(self, lmcache_backend_with_port): """Test setup with existing lmcache_rpc_port configuration.""" original_port = lmcache_backend_with_port.kv_transfer_config[ "kv_connector_extra_config" ]["lmcache_rpc_port"] lmcache_backend_with_port.setup() # Should modify the existing port by appending random string new_port = lmcache_backend_with_port.kv_transfer_config[ "kv_connector_extra_config" ]["lmcache_rpc_port"] assert new_port.startswith(original_port) assert len(new_port) > len(original_port) # Should have random string appended
test
0
{"function_name": "test_setup_with_existing_port", "class_name": "TestLMCacheConnectorV1Backend", "qualname": "TestLMCacheConnectorV1Backend.test_setup_with_existing_port", "file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_lmcache_connector_v1.py", "repo_id": "ray-project/ray", "loc": 14, "tested_modules": ["ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache", "ray.serve.llm"], "has_docstring": true, "runnable_level": "file_runnable"}
fastapi/fastapi:tests/test_dependency_paramless.py:test_get_credentials
# Context: def process_auth(credentials: Annotated[str | None, Security(oauth2_scheme)], security_scopes: SecurityScopes): ... def get_credentials(credentials: Annotated[dict, Security(process_auth, scopes=['a', 'b'])]): ... def get_parameterless_with_scopes(): ... def get_parameterless_without_scopes(): ... def test_parameterless_with_scopes(): ... def test_parameterless_without_scopes(): ... def test_call_get_parameterless_without_scopes_for_coverage(): ... # Task: Write a Python test function `test_get_credentials` to verify the behavior of `get_credentials`. Module under test: typing, fastapi, fastapi.security
def test_get_credentials(): response = client.get("/get-credentials", headers={"authorization": "Bearer token"}) assert response.status_code == 200, response.text assert response.json() == {"token": "token", "scopes": ["a", "b"]}
test
1
{"function_name": "test_get_credentials", "class_name": null, "qualname": "test_get_credentials", "file_path": "tests/test_dependency_paramless.py", "repo_id": "fastapi/fastapi", "loc": 4, "tested_modules": ["typing", "fastapi", "fastapi.security", "fastapi.testclient"], "has_docstring": false, "runnable_level": "file_runnable"}
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/mcps/web_search_mcp_server.py:_http_get_text
# Context: import httpx def _get_robots_txt_url(url: str) -> str: ... async def _check_may_fetch_url(url: str, user_agent: str) -> None: ... def _extract_markdown_from_html(html: str) -> str: ... def _truncate(content: str, max_length: int, start_index: int) -> tuple[str, str]: ... async def brave_search(query: str, num_results: int) -> str: ... async def fetch_url(url: str, max_length: int, start_index: int, raw: bool, ignore_robots_txt: bool) -> str: ... async def lifespan(app: FastAPI): ... class WebSearchMCP: ... # Task: Write a Python async function `_http_get_text` to fetch URL. Returns (text, content_type). Parameters: url: str Returns: tuple[str, str]
async def _http_get_text(url: str, *, user_agent: str, timeout_s: float = 30.0, headers: dict | None = None) -> tuple[str, str]: """ Fetch URL. Returns (text, content_type). """ request_headers = {"User-Agent": user_agent} if headers: request_headers.update(headers) async with httpx.AsyncClient(proxy=PROXY_URL, timeout=timeout_s, follow_redirects=True) as client: resp = await client.get(url, headers=request_headers) resp.raise_for_status() return resp.text, (resp.headers.get("content-type") or "")
function_simple
0
{"cognitive_complexity": 2, "loc": 11, "code_loc": 7, "docstring_loc": 3, "function_name": "_http_get_text", "class_name": null, "qualname": "_http_get_text", "file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/mcps/web_search_mcp_server.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:browser_use/llm/tests/test_anthropic_cache.py:TestAnthropicCache.test_cache_only_last_tool_call
# Context: from browser_use.llm.anthropic.serializer import AnthropicMessageSerializer, NonSystemMessage from browser_use.llm.messages import ( AssistantMessage, BaseMessage, ContentPartImageParam, ContentPartTextParam, Function, ImageURL, SystemMessage, ToolCall, UserMessage, ) class TestAnthropicCache: def test_cache_basic_functionality(self): ... def test_cache_with_tool_calls(self): ... def test_cache_with_images(self): ... def test_cache_with_base64_images(self): ... def test_cache_content_types(self): ... def test_assistant_cache_empty_content(self): ... def test_mixed_cache_scenarios(self): ... def test_system_message_cache_behavior(self): ... def test_agent_messages_integration(self): ... def test_cache_cleaning_last_message_only(self): ... def test_cache_cleaning_with_system_message(self): ... def test_cache_cleaning_no_cached_messages(self): ... def test_max_4_cache_blocks(self): ... def test_cache_only_last_block_in_message(self): ... def test_cache_assistant_with_content_and_tools(self): ... # Task: Write a Python test method `test_cache_only_last_tool_call` in test class `TestAnthropicCache` to test that only the LAST tool_use block gets cache_control. Module under test: typing, browser_use.agent.service, browser_use.llm.anthropic.chat
def test_cache_only_last_tool_call(self): """Test that only the LAST tool_use block gets cache_control.""" tool_calls = [ ToolCall(id='id1', function=Function(name='func1', arguments='{"arg": "1"}')), ToolCall(id='id2', function=Function(name='func2', arguments='{"arg": "2"}')), ToolCall(id='id3', function=Function(name='func3', arguments='{"arg": "3"}')), ] assistant_msg = AssistantMessage(content=None, tool_calls=tool_calls, cache=True) serialized = AnthropicMessageSerializer.serialize(assistant_msg) assert isinstance(serialized['content'], list) content_blocks = serialized['content'] # Count tool_use blocks with cache_control # Note: content_blocks are dicts at runtime despite type annotations cache_count = sum(1 for block in content_blocks if block.get('cache_control') is not None) # type: ignore[attr-defined] assert cache_count == 1, f'Expected 1 cache_control block, got {cache_count}' # Verify it's the last tool_use block assert content_blocks[-1].get('cache_control') is not None # type: ignore[attr-defined] assert content_blocks[0].get('cache_control') is None # type: ignore[attr-defined] assert content_blocks[1].get('cache_control') is None # type: ignore[attr-defined]
test
0
{"function_name": "test_cache_only_last_tool_call", "class_name": "TestAnthropicCache", "qualname": "TestAnthropicCache.test_cache_only_last_tool_call", "file_path": "browser_use/llm/tests/test_anthropic_cache.py", "repo_id": "browser-use/browser-use", "loc": 22, "tested_modules": ["typing", "browser_use.agent.service", "browser_use.llm.anthropic.chat", "browser_use.llm.anthropic.serializer", "browser_use.llm.messages"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/serve/tests/unit/test_grpc_replica_result.py:TestSeparateLoop.test_streaming_sync
# Context: import asyncio from ray._common.test_utils import wait_for_condition class FakegRPCUnaryCall: ... class FakegRPCStreamCall: ... def create_asyncio_event_loop_in_thread(): ... class TestSameLoop: ... class TestSeparateLoop: async def make_fake_unary_request(self, data, loop: asyncio.AbstractEventLoop): ... async def make_fake_streaming_request(self, data, loop: asyncio.AbstractEventLoop, on_separate_loop: bool, is_streaming: bool, event: threading.Event, error): ... def test_unary_sync(self, create_asyncio_event_loop_in_thread): ... async def test_unary_async(self, create_asyncio_event_loop_in_thread): ... async def test_streaming_async(self, create_asyncio_event_loop_in_thread): ... async def test_streaming_blocked(self, create_asyncio_event_loop_in_thread): ... def test_unary_with_gen_sync(self, create_asyncio_event_loop_in_thread): ... async def test_unary_with_gen_async(self, create_asyncio_event_loop_in_thread): ... async def test_unary_with_gen_blocked(self, create_asyncio_event_loop_in_thread): ... def test_unary_with_timeout(self, create_asyncio_event_loop_in_thread): ... def test_unary_error_sync(self, create_asyncio_event_loop_in_thread): ... async def test_unary_error_async(self, create_asyncio_event_loop_in_thread): ... def test_streaming_error_sync(self, create_asyncio_event_loop_in_thread): ... async def test_streaming_error_async(self, create_asyncio_event_loop_in_thread): ... # Task: Write a Python test method `test_streaming_sync` in test class `TestSeparateLoop` to verify the behavior of `streaming_sync`. Module under test: ray, ray._common.test_utils, ray.serve._private.common
def test_streaming_sync(self, create_asyncio_event_loop_in_thread): loop, _ = create_asyncio_event_loop_in_thread # Instantiate gRPCReplicaResult with FakegRPCStreamCall. This needs # to be run on the "other loop" fut = asyncio.run_coroutine_threadsafe( self.make_fake_streaming_request([1, 2, 3, 4], loop, on_separate_loop=True), loop=loop, ) replica_result = fut.result() # The async generator should be consumed even if we don't fetch # the items explicitly through the ReplicaResult object wait_for_condition(replica_result._call.is_empty, retry_interval_ms=10) # Finally, check results given by gRPCReplicaResult fetched from # the queue are correct assert list(replica_result) == [1, 2, 3, 4]
test
0
{"function_name": "test_streaming_sync", "class_name": "TestSeparateLoop", "qualname": "TestSeparateLoop.test_streaming_sync", "file_path": "python/ray/serve/tests/unit/test_grpc_replica_result.py", "repo_id": "ray-project/ray", "loc": 18, "tested_modules": ["ray", "ray._common.test_utils", "ray.serve._private.common", "ray.serve._private.replica_result", "ray.serve.generated"], "has_docstring": false, "runnable_level": "file_runnable"}
Comfy-Org/ComfyUI:tests-unit/assets_test/test_uploads.py:test_upload_models_unknown_category
# Context: import json import requests def test_upload_ok_duplicate_reference(http: requests.Session, api_base: str, make_asset_bytes): ... def test_upload_fastpath_from_existing_hash_no_file(http: requests.Session, api_base: str): ... def test_upload_fastpath_with_known_hash_and_file(http: requests.Session, api_base: str): ... def test_upload_multiple_tags_fields_are_merged(http: requests.Session, api_base: str): ... def test_concurrent_upload_identical_bytes_different_names(root: str, http: requests.Session, api_base: str, make_asset_bytes): ... def test_create_from_hash_endpoint_404(http: requests.Session, api_base: str): ... def test_upload_zero_byte_rejected(http: requests.Session, api_base: str): ... def test_upload_invalid_root_tag_rejected(http: requests.Session, api_base: str): ... def test_upload_user_metadata_must_be_json(http: requests.Session, api_base: str): ... def test_upload_requires_multipart(http: requests.Session, api_base: str): ... def test_upload_missing_file_and_hash(http: requests.Session, api_base: str): ... def test_upload_models_requires_category(http: requests.Session, api_base: str): ... def test_upload_tags_traversal_guard(http: requests.Session, api_base: str): ... def test_duplicate_upload_same_display_name_does_not_clobber(root: str, http: requests.Session, api_base: str, asset_factory, make_asset_bytes): ... # Task: Write a Python test function `test_upload_models_unknown_category` to verify the behavior of `upload_models_unknown_category`. Module under test: concurrent.futures
def test_upload_models_unknown_category(http: requests.Session, api_base: str): files = {"file": ("m.safetensors", b"A" * 128, "application/octet-stream")} form = {"tags": json.dumps(["models", "no_such_category", "unit-tests"]), "name": "m.safetensors"} r = http.post(api_base + "/api/assets", data=form, files=files, timeout=120) body = r.json() assert r.status_code == 400 assert body["error"]["code"] == "INVALID_BODY" assert body["error"]["message"].startswith("unknown models category")
test
1
{"function_name": "test_upload_models_unknown_category", "class_name": null, "qualname": "test_upload_models_unknown_category", "file_path": "tests-unit/assets_test/test_uploads.py", "repo_id": "Comfy-Org/ComfyUI", "loc": 8, "tested_modules": ["concurrent.futures"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/events/test_event_ordering.py:TestTriggeredByEventId.test_exception_handling_triggered_by
# Context: import pytest from crewai.events.base_events import BaseEvent from crewai.events.event_bus import crewai_event_bus from crewai.events.types.flow_events import ( FlowFinishedEvent, FlowStartedEvent, MethodExecutionFinishedEvent, MethodExecutionStartedEvent, ) from crewai.flow.flow import Flow, listen, start from crewai.events.base_events import reset_emission_counter from crewai.events.event_context import reset_last_event_id from crewai.events.types.flow_events import MethodExecutionFailedEvent class EventCollector: ... def collector() -> EventCollector: ... class TestCrewEventOrdering: ... class TestAgentEventOrdering: ... class TestFlowWithCrewEventOrdering: ... class TestFlowWithMultipleCrewsEventOrdering: ... class TestPreviousEventIdChain: ... class TestCrewEventsInFlowTriggeredBy: ... class TestTriggeredByEventId: async def test_triggered_by_event_id_for_listeners(self) -> None: ... async def test_start_method_has_no_triggered_by(self) -> None: ... async def test_chained_listeners_triggered_by(self) -> None: ... async def test_parallel_listeners_same_trigger(self) -> None: ... async def test_or_condition_triggered_by(self) -> None: ... async def test_router_triggered_by(self) -> None: ... async def test_multiple_kickoffs_maintain_chains(self) -> None: ... async def test_parallel_flows_maintain_separate_triggered_by_chains(self) -> None: ... async def test_and_condition_triggered_by_last_method(self) -> None: ... async def test_sync_method_in_flow_triggered_by(self) -> None: ... async def test_multiple_start_methods_triggered_by(self) -> None: ... async def test_none_return_triggered_by(self) -> None: ... async def test_deeply_nested_chain_triggered_by(self) -> None: ... async def test_router_conditional_path_triggered_by(self) -> None: ... # Task: Write a Python test method `test_exception_handling_triggered_by` in test class `TestTriggeredByEventId` to events emitted after exception should still have correct triggered_by. Module under test: crewai.agent, crewai.crew, crewai.events.base_events
async def test_exception_handling_triggered_by(self) -> None: """Events emitted after exception should still have correct triggered_by.""" from crewai.events.base_events import reset_emission_counter from crewai.events.event_context import reset_last_event_id from crewai.events.types.flow_events import MethodExecutionFailedEvent reset_emission_counter() reset_last_event_id() events: list[BaseEvent] = [] class ExceptionFlow(Flow): @start() async def will_fail(self): raise ValueError("intentional error") with crewai_event_bus.scoped_handlers(): @crewai_event_bus.on(MethodExecutionStartedEvent) def capture_started(source, event): events.append(event) @crewai_event_bus.on(MethodExecutionFinishedEvent) def capture_finished(source, event): events.append(event) @crewai_event_bus.on(MethodExecutionFailedEvent) def capture_failed(source, event): events.append(event) @crewai_event_bus.on(FlowStartedEvent) def capture_flow_started(source, event): events.append(event) flow = ExceptionFlow() try: await flow.akickoff() except ValueError: pass # Expected crewai_event_bus.flush() # Even with exception, events should have proper previous_event_id chain all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0) for event in all_sorted[1:]: assert event.previous_event_id is not None, ( f"Event {event.type} (seq {event.emission_sequence}) should have previous_event_id" )
test
0
{"function_name": "test_exception_handling_triggered_by", "class_name": "TestTriggeredByEventId", "qualname": "TestTriggeredByEventId.test_exception_handling_triggered_by", "file_path": "lib/crewai/tests/events/test_event_ordering.py", "repo_id": "crewAIInc/crewAI", "loc": 47, "tested_modules": ["crewai.agent", "crewai.crew", "crewai.events.base_events", "crewai.events.event_bus", "crewai.events.types.agent_events"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/events/test_depends.py:module_doc
Write a module-level docstring for the Python module `test_depends` which contains class `DependsTestEvent`.
Tests for FastAPI-style dependency injection in event handlers.
documentation
0
{"doc_type": "module", "module_name": "test_depends", "file_path": "lib/crewai/tests/events/test_depends.py", "repo_id": "crewAIInc/crewAI", "char_length": 63}
apache/airflow:providers/google/tests/unit/google/cloud/operators/test_gen_ai.py:TestGenAIGeminiCreateEmbeddingsBatchJobOperator.test__wait_until_complete_exception_raises_airflow_exception
# Context: from unittest import mock import pytest from airflow.exceptions import AirflowException from airflow.providers.google.cloud.operators.gen_ai import ( GenAICountTokensOperator, GenAICreateCachedContentOperator, GenAIGeminiCancelBatchJobOperator, GenAIGeminiCreateBatchJobOperator, GenAIGeminiCreateEmbeddingsBatchJobOperator, GenAIGeminiDeleteBatchJobOperator, GenAIGeminiDeleteFileOperator, GenAIGeminiGetBatchJobOperator, GenAIGeminiGetFileOperator, GenAIGeminiListBatchJobsOperator, GenAIGeminiListFilesOperator, GenAIGeminiUploadFileOperator, GenAIGenerateContentOperator, GenAIGenerateEmbeddingsOperator, GenAISupervisedFineTuningTrainOperator, ) def assert_warning(msg: str, warnings): ... class TestGenAIGenerateEmbeddingsOperator: ... class TestGenAIGenerateContentOperator: ... class TestGenAISupervisedFineTuningTrainOperator: ... class TestGenAICountTokensOperator: ... class TestGenAICreateCachedContentOperator: ... class TestGenAIGenerateFromCachedContentOperator: ... class TestGenAIGeminiCreateBatchJobOperator: ... class TestGenAIGeminiGetBatchJobOperator: ... class TestGenAIGeminiListBatchJobsOperator: ... class TestGenAIGeminiDeleteBatchJobOperator: ... class TestGenAIGeminiCancelBatchJobOperator: ... class TestGenAIGeminiGetFileOperator: ... class TestGenAIGeminiUploadFileOperator: ... class TestGenAIGeminiListFilesOperator: ... class TestGenAIGeminiDeleteFileOperator: ... class TestGenAIGeminiCreateEmbeddingsBatchJobOperator: def test_execute(self, mock_hook): ... def test_init_retrieve_result_and_not_wait_until_complete_raises_airflow_exception(self): ... def test_init_input_source_not_string_raises_airflow_exception(self): ... def test_init_results_folder_not_exists_raises_airflow_exception(self): ... def test_execute_exception_error_raises_airflow_exception(self, mock_hook): ... def test_execute_complete_error_status_raises_airflow_exception(self): ... # Task: Write a Python test method `test__wait_until_complete_exception_raises_airflow_exception` in test class `TestGenAIGeminiCreateEmbeddingsBatchJobOperator` to verify the behavior of `_wait_until_complete_exception_raises_airflow_exception`. Module under test: __future__, google.genai.errors, google.genai.types
def test__wait_until_complete_exception_raises_airflow_exception(self, mock_hook): op = GenAIGeminiCreateEmbeddingsBatchJobOperator( task_id=TASK_ID, project_id=GCP_PROJECT, location=GCP_LOCATION, input_source=TEST_EMBEDDINGS_JOB_INLINED_REQUESTS, model=EMBEDDING_MODEL, gemini_api_key=TEST_GEMINI_API_KEY, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_hook.return_value.get_batch_job.side_effect = Exception() with pytest.raises(AirflowException): op._wait_until_complete(job=mock.MagicMock())
test
1
{"function_name": "test__wait_until_complete_exception_raises_airflow_exception", "class_name": "TestGenAIGeminiCreateEmbeddingsBatchJobOperator", "qualname": "TestGenAIGeminiCreateEmbeddingsBatchJobOperator.test__wait_until_complete_exception_raises_airflow_exception", "file_path": "providers/google/tests/unit/google/cloud/operators/test_gen_ai.py", "repo_id": "apache/airflow", "loc": 16, "tested_modules": ["__future__", "google.genai.errors", "google.genai.types", "airflow.exceptions", "airflow.providers.google.cloud.operators.gen_ai"], "has_docstring": false, "runnable_level": "project_runnable"}
github/spec-kit:src/specify_cli/extensions.py:ExtensionManager:class_doc
Write a class-level docstring for `ExtensionManager` which has methods: `__init__`, `check_compatibility`, `install_from_directory`, `install_from_zip`, `remove`.
Manages extension lifecycle: installation, removal, updates.
documentation
0
{"doc_type": "class", "class_name": "ExtensionManager", "file_path": "src/specify_cli/extensions.py", "repo_id": "github/spec-kit", "char_length": 60, "methods": ["__init__", "check_compatibility", "install_from_directory", "install_from_zip", "remove", "list_installed", "get_extension"]}
paperless-ngx/paperless-ngx:src/documents/tests/test_api_document_versions.py:TestDocumentVersioningApi.test_root_endpoint_returns_403_when_user_lacks_permission
# Context: from django.contrib.auth.models import Permission from django.contrib.auth.models import User from rest_framework import status from documents.models import Document class TestVersionAwareFilters(TestCase): ... class TestDocumentVersioningApi(DirectoriesMixin, APITestCase): def setUp(self) -> None: ... def _make_pdf_upload(self, name: str) -> SimpleUploadedFile: ... def _write_file(self, path: Path, content: bytes) -> None: ... def _create_pdf(self, title: str, checksum: str, root_document: Document | None) -> Document: ... def test_root_endpoint_returns_root_for_version_and_root(self) -> None: ... def test_root_endpoint_returns_404_for_missing_document(self) -> None: ... def test_delete_version_disallows_deleting_root(self) -> None: ... def test_delete_version_deletes_version_and_returns_current_version(self) -> None: ... def test_delete_version_writes_audit_log_entry(self) -> None: ... def test_delete_version_returns_404_when_version_not_related(self) -> None: ... def test_delete_version_accepts_version_id_as_root_parameter(self) -> None: ... def test_delete_version_returns_404_when_root_missing(self) -> None: ... def test_delete_version_reindexes_root_document(self) -> None: ... def test_delete_version_returns_403_without_permission(self) -> None: ... def test_delete_version_returns_404_when_version_missing(self) -> None: ... def test_update_version_label_updates_and_trims(self) -> None: ... def test_update_version_label_clears_on_blank(self) -> None: ... def test_update_version_label_returns_403_without_permission(self) -> None: ... def test_update_version_label_returns_404_for_unrelated_version(self) -> None: ... def test_download_version_param_errors(self) -> None: ... def test_download_preview_thumb_with_version_param(self) -> None: ... def test_metadata_version_param_uses_version(self) -> None: ... def test_metadata_version_param_errors(self) -> None: ... def test_metadata_returns_403_when_user_lacks_permission(self) -> None: ... def test_update_version_enqueues_consume_with_overrides(self) -> None: ... def test_update_version_with_version_pk_normalizes_to_root(self) -> None: ... def test_update_version_returns_500_on_consume_failure(self) -> None: ... def test_update_version_returns_403_without_permission(self) -> None: ... def test_update_version_returns_404_for_missing_document(self) -> None: ... def test_update_version_requires_document(self) -> None: ... def test_patch_content_updates_latest_version_content(self) -> None: ... def test_patch_content_updates_selected_version_content(self) -> None: ... def test_retrieve_returns_latest_version_content(self) -> None: ... def test_retrieve_with_version_param_returns_selected_version_content(self) -> None: ... # Task: Write a Python test method `test_root_endpoint_returns_403_when_user_lacks_permission` in test class `TestDocumentVersioningApi` to verify the behavior of `root_endpoint_returns_403_when_user_lacks_permission`. Module under test: __future__, typing, auditlog.models
def test_root_endpoint_returns_403_when_user_lacks_permission(self) -> None: owner = User.objects.create_user(username="owner") viewer = User.objects.create_user(username="viewer") viewer.user_permissions.add( Permission.objects.get(codename="view_document"), ) root = Document.objects.create( title="root", checksum="root", mime_type="application/pdf", owner=owner, ) self.client.force_authenticate(user=viewer) resp = self.client.get(f"/api/documents/{root.id}/root/") self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
test
1
{"function_name": "test_root_endpoint_returns_403_when_user_lacks_permission", "class_name": "TestDocumentVersioningApi", "qualname": "TestDocumentVersioningApi.test_root_endpoint_returns_403_when_user_lacks_permission", "file_path": "src/documents/tests/test_api_document_versions.py", "repo_id": "paperless-ngx/paperless-ngx", "loc": 17, "tested_modules": ["__future__", "typing", "auditlog.models", "django.contrib.auth.models", "django.contrib.auth.models"], "has_docstring": false, "runnable_level": "project_runnable"}