Dataset Viewer
Auto-converted to Parquet Duplicate
sample_id
string
instruction
string
completion
string
category
string
label
int64
metadata
string
run-llama/llama_index:llama-index-core/llama_index/core/chat_engine/multi_modal_context.py:MultiModalContextChatEngine.from_defaults
# Context: from typing import Any, List, Optional, Sequence, Tuple, Union from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, MessageRole, ) from llama_index.core.llms import LLM, TextBlock, ChatMessage, ImageBlock from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.prompts import PromptTemplate from llama_index.core.memory import BaseMemory, Memory from llama_index.core.settings import Settings from llama_index.core.base.base_retriever import BaseRetriever def _get_image_and_text_nodes(nodes: List[NodeWithScore]) -> Tuple[List[NodeWithScore], List[NodeWithScore]]: ... def _ensure_query_bundle(str_or_query_bundle: QueryType) -> QueryBundle: ... class MultiModalContextChatEngine(BaseChatEngine): def __init__( self, retriever: BaseRetriever, multi_modal_llm: LLM, memory: BaseMemory, system_prompt: str, context_template: Optional[Union[str, PromptTemplate]] = None, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ) -> None: self._retriever = retriever self._multi_modal_llm = multi_modal_llm context_template = context_template or DEFAULT_TEXT_QA_PROMPT if isinstance(context_template, str): context_template = PromptTemplate(context_template) self._context_template = context_template self._memory = memory self._system_prompt = system_prompt self._node_postprocessors = node_postprocessors or [] self.callback_manager = callback_manager or CallbackManager([]) for node_postprocessor in self._node_postprocessors: node_postprocessor.callback_manager = self.callback_manager def _apply_node_postprocessors(self, nodes: List[NodeWithScore], query_bundle: QueryBundle) -> List[NodeWithScore]: ... def _get_nodes(self, query_bundle: QueryBundle) -> List[NodeWithScore]: ... async def _aget_nodes(self, query_bundle: QueryBundle) -> List[NodeWithScore]: ... def synthesize(self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]], streaming: bool) -> RESPONSE_TYPE: ... async def asynthesize(self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]], streaming: bool) -> RESPONSE_TYPE: ... def chat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> AgentChatResponse: ... def stream_chat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> StreamingAgentChatResponse: ... async def achat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> AgentChatResponse: ... async def astream_chat(self, message: str, chat_history: Optional[List[ChatMessage]], prev_chunks: Optional[List[NodeWithScore]]) -> StreamingAgentChatResponse: ... def reset(self) -> None: ... def chat_history(self) -> List[ChatMessage]: ... # Task: Write a Python method `from_defaults` for the class `MultiModalContextChatEngine` to initialize a MultiModalContextChatEngine from default parameters. Parameters: retriever: BaseRetriever, chat_history: Optional[List[ChatMessage]], memory: Optional[BaseMemory], system_prompt: Optional[str], node_postprocessors: Optional[List[BaseNodePostprocessor]], context_template: Optional[Union[str, PromptTemplate]], multi_modal_llm: Optional[LLM] Returns: 'MultiModalContextChatEngine'
def from_defaults( cls, retriever: BaseRetriever, chat_history: Optional[List[ChatMessage]] = None, memory: Optional[BaseMemory] = None, system_prompt: Optional[str] = None, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, context_template: Optional[Union[str, PromptTemplate]] = None, multi_modal_llm: Optional[LLM] = None, **kwargs: Any, ) -> "MultiModalContextChatEngine": """Initialize a MultiModalContextChatEngine from default parameters.""" multi_modal_llm = multi_modal_llm or Settings.llm chat_history = chat_history or [] memory = memory or Memory.from_defaults( chat_history=chat_history, token_limit=multi_modal_llm.metadata.context_window - 256, ) system_prompt = system_prompt or "" node_postprocessors = node_postprocessors or [] return cls( retriever, multi_modal_llm=multi_modal_llm, memory=memory, system_prompt=system_prompt, node_postprocessors=node_postprocessors, callback_manager=Settings.callback_manager, context_template=context_template, )
function_simple
1
{"cognitive_complexity": 5, "loc": 32, "code_loc": 17, "docstring_loc": 1, "function_name": "from_defaults", "class_name": "MultiModalContextChatEngine", "qualname": "MultiModalContextChatEngine.from_defaults", "file_path": "llama-index-core/llama_index/core/chat_engine/multi_modal_context.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py:license_header
Add a Apache-2.0 license header comment for the project 'transformers', authored by The Google AI Language Team Authors and The HuggingFace Inc, year 2018.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
license
0
{"license_type": "Apache-2.0", "author": "The Google AI Language Team Authors and The HuggingFace Inc", "year": "2018", "source": "header", "repo_id": "huggingface/transformers"}
ray-project/ray:python/ray/serve/tests/test_direct_ingress.py:test_get_serve_instance_details_json_serializable
# Context: import json import pytest import ray from ray import serve from ray.serve._private.constants import ( DEFAULT_AUTOSCALING_POLICY_NAME, HEALTHY_MESSAGE, RAY_SERVE_DIRECT_INGRESS_MAX_HTTP_PORT, RAY_SERVE_DIRECT_INGRESS_MIN_GRPC_PORT, RAY_SERVE_DIRECT_INGRESS_MIN_HTTP_PORT, RAY_SERVE_DIRECT_INGRESS_PORT_RETRY_COUNT, RAY_SERVE_ENABLE_DIRECT_INGRESS, RAY_SERVE_ENABLE_HA_PROXY, SERVE_DEFAULT_APP_NAME, ) from ray.serve.autoscaling_policy import default_autoscaling_policy from ray.serve.context import _get_global_client from ray.serve.tests.conftest import TEST_GRPC_SERVICER_FUNCTIONS class Collector: ... def _skip_if_ff_not_enabled(): ... def _skip_if_haproxy_enabled(): ... def _shared_serve_instance(): ... def serve_instance(_shared_serve_instance): ... class Hybrid: ... def get_target_groups(app_name: str, from_proxy_manager: bool): ... def test_proxy_is_started_on_head_only_mode(_skip_if_ff_not_enabled, serve_instance): ... def get_http_ports(route_prefix, first_only): ... def get_grpc_ports(route_prefix, first_only): ... def test_basic(_skip_if_ff_not_enabled, serve_instance): ... def test_internal_server_error(_skip_if_ff_not_enabled, serve_instance): ... def test_fastapi_app(_skip_if_ff_not_enabled, serve_instance): ... def test_http_request_id(_skip_if_ff_not_enabled, serve_instance, use_fastapi: bool): ... def test_grpc_request_id(_skip_if_ff_not_enabled, serve_instance): ... def test_multiplexed_model_id(_skip_if_ff_not_enabled, serve_instance): ... def test_health_check(_skip_if_ff_not_enabled, serve_instance): ... def test_port_retry_logic(_skip_if_ff_not_enabled, serve_instance): ... def test_replica_gives_up_after_max_port_retries_for_http(_skip_if_ff_not_enabled, serve_instance): ... def test_replica_gives_up_after_max_port_retries_for_grpc(_skip_if_ff_not_enabled, serve_instance): ... def test_no_port_available(_skip_if_ff_not_enabled, serve_instance): ... def test_replica_releases_ports_on_shutdown(_skip_if_ff_not_enabled, serve_instance): ... def test_get_serve_instance_details(_skip_if_ff_not_enabled, serve_instance): ... def test_only_ingress_deployment_replicas_are_used_for_target_groups(_skip_if_ff_not_enabled, serve_instance): ... def test_crashed_replica_port_is_released_and_reused(_skip_if_ff_not_enabled, serve_instance): ... def test_multiple_applications_on_same_node(_skip_if_ff_not_enabled, serve_instance): ... def test_app_with_composite_deployments(_skip_if_ff_not_enabled, serve_instance): ... def test_only_running_apps_are_used_for_target_groups(_skip_if_ff_not_enabled, serve_instance): ... def test_some_replicas_not_running(_skip_if_ff_not_enabled, serve_instance): ... def test_port_recovery_on_controller_restart(_skip_if_ff_not_enabled, serve_instance): ... class TestDirectIngressBackpressure: ... class TestDirectIngressAutoscaling: ... def test_disconnect(_skip_if_ff_not_enabled, serve_instance): ... def test_context_propagation(_skip_if_ff_not_enabled, serve_instance): ... def test_context_propagation_with_child(_skip_if_ff_not_enabled, serve_instance): ... def test_shutdown_replica_only_after_draining_requests(_skip_if_ff_not_enabled, serve_instance): ... def test_http_routes_endpoint(_skip_if_ff_not_enabled, serve_instance): ... def test_grpc_list_applications_endpoint(_skip_if_ff_not_enabled, _skip_if_haproxy_enabled, serve_instance): ... def test_redeploy_start_time(_skip_if_ff_not_enabled, serve_instance): ... def test_deploy_app_custom_exception(_skip_if_ff_not_enabled, serve_instance): ... def test_get_deployment_config(_skip_if_ff_not_enabled, serve_instance): ... def test_stuck_requests_are_force_killed(_skip_if_ff_not_enabled, serve_instance): ... # Task: Write a Python test function `test_get_serve_instance_details_json_serializable` to test the result from get_serve_instance_details is json serializable. Module under test: concurrent.futures, typing, uuid
def test_get_serve_instance_details_json_serializable( _skip_if_ff_not_enabled, serve_instance, policy_name ): """Test the result from get_serve_instance_details is json serializable.""" controller = _get_global_client()._controller autoscaling_config = { "min_replicas": 1, "max_replicas": 10, "_policy": {"name": policy_name}, } if policy_name is None: autoscaling_config.pop("_policy") @serve.deployment(autoscaling_config=autoscaling_config) def autoscaling_app(): return "1" serve.run(autoscaling_app.bind()) details = ray.get(controller.get_serve_instance_details.remote()) details_json = json.dumps(details) controller_details = ray.get(controller.get_actor_details.remote()) node_id = controller_details.node_id node_ip = controller_details.node_ip node_instance_id = controller_details.node_instance_id proxy_details = ray.get(controller.get_proxy_details.remote(node_id=node_id)) deployment_timestamp = ray.get( controller.get_deployment_timestamps.remote(app_name="default") ) deployment_details = ray.get( controller.get_deployment_details.remote("default", "autoscaling_app") ) replica = deployment_details.replicas[0] expected_json = json.dumps( { "controller_info": { "node_id": node_id, "node_ip": node_ip, "node_instance_id": node_instance_id, "actor_id": controller_details.actor_id, "actor_name": controller_details.actor_name, "worker_id": controller_details.worker_id, "log_file_path": controller_details.log_file_path, }, "proxy_location": "HeadOnly", "http_options": {"host": "0.0.0.0"}, "grpc_options": { "port": 9000, "grpc_servicer_functions": TEST_GRPC_SERVICER_FUNCTIONS, }, "proxies": { node_id: { "node_id": node_id, "node_ip": node_ip, "node_instance_id": node_instance_id, "actor_id": proxy_details.actor_id, "actor_name": proxy_details.actor_name, "worker_id": proxy_details.worker_id, "log_file_path": proxy_details.log_file_path, "status": proxy_details.status, } }, "applications": { "default": { "name": "default", "route_prefix": "/", "docs_path": None, "status": "RUNNING", "message": "", "last_deployed_time_s": deployment_timestamp, "deployed_app_config": None, "source": "imperative", "deployments": { "autoscaling_app": { "name": "autoscaling_app", "status": "HEALTHY", "status_trigger": "CONFIG_UPDATE_COMPLETED", "message": "", "deployment_config": { "name": "autoscaling_app", "max_ongoing_requests": 5, "max_queued_requests": -1, "user_config": None, "autoscaling_config": { "min_replicas": 1, "initial_replicas": None, "max_replicas": 10, "target_ongoing_requests": 2.0, "metrics_interval_s": 10.0, "look_back_period_s": 30.0, "smoothing_factor": 1.0, "upscale_smoothing_factor": None, "downscale_smoothing_factor": None, "upscaling_factor": None, "downscaling_factor": None, "downscale_delay_s": 600.0, "downscale_to_zero_delay_s": None, "upscale_delay_s": 30.0, "aggregation_function": "mean", "policy": { "policy_function": "ray.serve.autoscaling_policy:default_autoscaling_policy", "policy_kwargs": {}, }, }, "graceful_shutdown_wait_loop_s": 2.0, "graceful_shutdown_timeout_s": 20.0, "health_check_period_s": 10.0, "health_check_timeout_s": 30.0, "ray_actor_options": { "num_cpus": 1.0, }, "request_router_config": { "request_router_class": "ray.serve._private.request_router:PowerOfTwoChoicesRequestRouter", "request_router_kwargs": {}, "request_routing_stats_period_s": 10.0, "request_routing_stats_timeout_s": 30.0, }, }, "target_num_replicas": 1, "required_resources": {"CPU": 1}, "replicas": [ { "node_id": node_id, "node_ip": node_ip, "node_instance_id": node_instance_id, "actor_id": replica.actor_id, "actor_name": replica.actor_name, "worker_id": replica.worker_id, "log_file_path": replica.log_file_path, "replica_id": replica.replica_id, "state": "RUNNING", "pid": replica.pid, "start_time_s": replica.start_time_s, } ], } }, "external_scaler_enabled": False, "deployment_topology": { "app_name": "default", "nodes": { "autoscaling_app": { "name": "autoscaling_app", "app_name": "default", "outbound_deployments": [], "is_ingress": True, }, }, "ingress_deployment": "autoscaling_app", }, } }, "target_capacity": None, "target_groups": [ { "targets": [ { "ip": node_ip, "port": 8000 if RAY_SERVE_ENABLE_HA_PROXY else 30000, "instance_id": node_instance_id, "name": proxy_details.actor_name if RAY_SERVE_ENABLE_HA_PROXY else replica.actor_name, }, ], "route_prefix": "/", "protocol": "HTTP", "app_name": "" if RAY_SERVE_ENABLE_HA_PROXY else "default", }, { "targets": [ { "ip": node_ip, "port": 9000 if RAY_SERVE_ENABLE_HA_PROXY else 40000, "instance_id": node_instance_id, "name": proxy_details.actor_name if RAY_SERVE_ENABLE_HA_PROXY else replica.actor_name, }, ], "route_prefix": "/", "protocol": "gRPC", "app_name": "" if RAY_SERVE_ENABLE_HA_PROXY else "default", }, ], } ) assert details_json == expected_json # ensure internal field, serialized_policy_def, is not exposed application = details["applications"]["default"] deployment = application["deployments"]["autoscaling_app"] autoscaling_config = deployment["deployment_config"]["autoscaling_config"] assert "_serialized_policy_def" not in autoscaling_config
test
0
{"function_name": "test_get_serve_instance_details_json_serializable", "class_name": null, "qualname": "test_get_serve_instance_details_json_serializable", "file_path": "python/ray/serve/tests/test_direct_ingress.py", "repo_id": "ray-project/ray", "loc": 196, "tested_modules": ["concurrent.futures", "typing", "uuid", "fastapi", "starlette.requests"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/train/lint/check_circular_imports.py:expand_to_include_reexports
# Context: from typing import Dict, List, Optional, Set, Tuple def find_train_packages(base_train_dir: Path, patch_train_dir: Path) -> None: ... def is_train_package(module_str: str) -> bool: ... def get_base_dir() -> Path: ... def get_base_train_dir() -> Path: ... def does_overlap(main_module: str, module: str) -> bool: ... class Import: ... class ImportCollector(ast.NodeVisitor): ... def collect_imports(module_name: str, is_package: bool, source_text: str) -> Set[Import]: ... def to_module_name_and_is_package(py_file: Path) -> Tuple[str, bool]: ... def get_file_module_imports(files: List[Path], module_match_string: Optional[str]) -> Dict[str, List[Import]]: ... def convert_to_file_paths(imports: List[Import]) -> List[Path]: ... def check_violations(base_train_patching_imports: Dict[str, List[Import]], patch_dir: Path) -> List[str]: ... def main(): ... # Task: Write a Python function `expand_to_include_reexports` to expands the set of imports for a given import map to include the modules resulting from reexports. Parameters: import_map: Dict[str, List[Import]] Returns: None
def expand_to_include_reexports(import_map: Dict[str, List[Import]]) -> None: """ Expands the set of imports for a given import map to include the modules resulting from reexports. So if in the base train module, there is "from x import a, b" and x is a package, then this function will explore the __init__.py of x and include the modules a and b were reexported from in the import map. """ for module, base_imports in import_map.items(): # Get only the package imports packages = [imp for imp in base_imports if imp.is_package] package_files = convert_to_file_paths(packages) reexports = get_file_module_imports(package_files) agg_reexports = [] # Filter patch init file imports to those that only contain the right names for base_import in base_imports: if base_import.module in reexports: import_list = reexports[base_import.module] target_reexports = [ imp for imp in import_list if set(imp.names) & set(base_import.names) ] agg_reexports.extend(target_reexports) # Expand modules to include reexported modules import_map[module].extend(agg_reexports)
function_complex
0
{"cognitive_complexity": 6, "loc": 26, "code_loc": 15, "docstring_loc": 5, "function_name": "expand_to_include_reexports", "class_name": null, "qualname": "expand_to_include_reexports", "file_path": "python/ray/train/lint/check_circular_imports.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
Comfy-Org/ComfyUI:tests-unit/prompt_server_test/system_user_endpoint_test.py:TestSystemUserEndpointBlocking.test_userdata_post_blocks_system_user
# Context: import pytest from unittest.mock import patch def mock_user_directory(tmp_path): ... def user_manager_multi_user(mock_user_directory): ... def app_multi_user(user_manager_multi_user): ... class TestSystemUserCreationBlocking: ... class TestPublicUserStillWorks: ... class TestCustomNodeScenario: ... class TestStructuralSecurity: ... class TestSystemUserEndpointBlocking: async def test_userdata_get_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... async def test_userdata_delete_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... async def test_v2_userdata_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... async def test_move_userdata_blocks_system_user(self, aiohttp_client, app_multi_user, mock_user_directory): ... # Task: Write a Python test method `test_userdata_post_blocks_system_user` in test class `TestSystemUserEndpointBlocking` to pOST /userdata with System User header should be blocked. Module under test: aiohttp, app.user_manager
async def test_userdata_post_blocks_system_user( self, aiohttp_client, app_multi_user, mock_user_directory ): """ POST /userdata with System User header should be blocked. """ client = await aiohttp_client(app_multi_user) with patch('app.user_manager.args') as mock_args: mock_args.multi_user = True resp = await client.post( "/userdata/test.txt", headers={"comfy-user": "__system"}, data=b"malicious content" ) assert resp.status in [400, 403, 500], \ f"System User write should be blocked, got {resp.status}" # Verify no file was created assert not (mock_user_directory / "__system" / "test.txt").exists()
test
1
{"function_name": "test_userdata_post_blocks_system_user", "class_name": "TestSystemUserEndpointBlocking", "qualname": "TestSystemUserEndpointBlocking.test_userdata_post_blocks_system_user", "file_path": "tests-unit/prompt_server_test/system_user_endpoint_test.py", "repo_id": "Comfy-Org/ComfyUI", "loc": 21, "tested_modules": ["aiohttp", "app.user_manager"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:release/nightly_tests/dataset/training_ingest_benchmark.py:BaseDataLoader.__init__
# Context: from typing import Dict, List, Optional from dataset_benchmark_util import IMAGENET_WNID_TO_ID class BenchmarkConfig: ... class S3ParquetDataLoader(BaseDataLoader): ... class S3UrlImageDataLoader(BaseDataLoader): ... class S3ReadImagesDataLoader(BaseDataLoader): ... def create_data_loader(data_loader: str, split: str) -> BaseDataLoader: ... def benchmark_iteration(dataset: ray.data.Dataset, batch_size: int, prefetch_batches: int, num_batches: int, simulated_training_time: float, device: str, pin_memory: bool) -> Dict[str, float]: ... def run_benchmark(config: BenchmarkConfig) -> List[Dict]: ... def print_summary(results: List[Dict]): ... def main(): ... class BaseDataLoader(ABC): TRANSFORM_CONFIGS = { def get_transform(cls, transform_type: str) -> transforms.Compose: ... def tensor_to_numpy(tensor) -> np.ndarray: ... def add_image_columns(result: Dict, processed_image: np.ndarray, num_columns: int): ... def make_split_dirs(s3_root: str) -> Dict[str, str]: ... def compute_limit(batch_size: int, num_batches: int) -> int: ... def create_dataset(self, transform_type: str, batch_size: int, num_batches: int, num_image_columns: int) -> ray.data.Dataset: ... # Task: Write a Python method `__init__` for the class `BaseDataLoader` to initialize the data loader. Parameters: data_dir: str, label_to_id_map: Dict[str, int]
def __init__(self, data_dir: str, label_to_id_map: Dict[str, int] = None): """Initialize the data loader. Args: data_dir: Path to data directory label_to_id_map: Mapping from label strings to integer IDs """ self.data_dir = data_dir self.label_to_id_map = label_to_id_map or IMAGENET_WNID_TO_ID
function_simple
0
{"cognitive_complexity": 1, "loc": 9, "code_loc": 2, "docstring_loc": 6, "function_name": "__init__", "class_name": "BaseDataLoader", "qualname": "BaseDataLoader.__init__", "file_path": "release/nightly_tests/dataset/training_ingest_benchmark.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestBuildRichFieldDescription.test_min_max_length
# Context: from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestRequiredOptional: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestStripUnsupportedFormats: ... class TestEnsureTypeInSchemas: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestBuildRichFieldDescription: def test_description_only(self) -> None: ... def test_empty_schema(self) -> None: ... def test_format(self) -> None: ... def test_enum(self) -> None: ... def test_pattern(self) -> None: ... def test_min_max(self) -> None: ... def test_examples(self) -> None: ... def test_combined_constraints(self) -> None: ... # Task: Write a Python test method `test_min_max_length` in test class `TestBuildRichFieldDescription` to verify the behavior of `min_max_length`. Module under test: __future__, copy, typing
def test_min_max_length(self) -> None: desc = build_rich_field_description({"minLength": 1, "maxLength": 255}) assert "Min length: 1" in desc assert "Max length: 255" in desc
test
0
{"function_name": "test_min_max_length", "class_name": "TestBuildRichFieldDescription", "qualname": "TestBuildRichFieldDescription.test_min_max_length", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/dashboard/tests/test_dashboard_auth.py:test_dashboard_request_requires_auth_invalid_token
# Context: import requests def test_dashboard_request_requires_auth_with_valid_token(setup_cluster_with_token_auth): ... def test_dashboard_request_requires_auth_missing_token(setup_cluster_with_token_auth): ... def test_dashboard_request_with_ray_auth_header(setup_cluster_with_token_auth): ... def test_authorization_header_takes_precedence(setup_cluster_with_token_auth): ... def test_dashboard_auth_disabled(setup_cluster_without_token_auth): ... def test_authentication_mode_endpoint_with_token_auth(setup_cluster_with_token_auth): ... def test_authentication_mode_endpoint_without_auth(setup_cluster_without_token_auth): ... def test_authentication_mode_endpoint_is_public(setup_cluster_with_token_auth): ... # Task: Write a Python test function `test_dashboard_request_requires_auth_invalid_token` to test that requests fail with invalid token when auth is enabled.
def test_dashboard_request_requires_auth_invalid_token(setup_cluster_with_token_auth): """Test that requests fail with invalid token when auth is enabled.""" cluster_info = setup_cluster_with_token_auth headers = {"Authorization": "Bearer wrong_token_00000000000000000000000000000000"} response = requests.get( f"{cluster_info['dashboard_url']}/api/component_activities", json={"test": "data"}, headers=headers, ) assert response.status_code == 403
test
0
{"function_name": "test_dashboard_request_requires_auth_invalid_token", "class_name": null, "qualname": "test_dashboard_request_requires_auth_invalid_token", "file_path": "python/ray/dashboard/tests/test_dashboard_auth.py", "repo_id": "ray-project/ray", "loc": 13, "tested_modules": [], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:doc/source/train/tutorials/ci/py_scripts/04b_tabular_workload_pattern.py:_dmat_from_arrow
# Context: import numpy as np import xgboost as xgb import pyarrow as pa def _arrow_table_from_shard(name: str) -> pa.Table: ... def train_func(config): ... class XGBPredictor: ... # Task: Write a Python function `_dmat_from_arrow` to build XGBoost DMatrix from pyarrow.Table with explicit feature_names. Parameters: table: pa.Table, feature_cols, label_col: str
def _dmat_from_arrow(table: pa.Table, feature_cols, label_col: str): """Build XGBoost DMatrix from pyarrow.Table with explicit feature_names.""" X = np.column_stack([table[c].to_numpy(zero_copy_only=False) for c in feature_cols]) y = table[label_col].to_numpy(zero_copy_only=False) return xgb.DMatrix(X, label=y, feature_names=feature_cols)
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "_dmat_from_arrow", "class_name": null, "qualname": "_dmat_from_arrow", "file_path": "doc/source/train/tutorials/ci/py_scripts/04b_tabular_workload_pattern.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py:test_hunyuan_a13b_tool_parser_streaming
# Context: from unittest.mock import MagicMock import pytest from tests.entrypoints.openai.tool_parsers.utils import ( run_tool_extraction, run_tool_extraction_streaming, ) from vllm.tool_parsers import ToolParser, ToolParserManager def make_tool_call(name, arguments): ... def test_hunyuan_a13b_tool_parser_extract(model_output, expected_tool_calls, expected_content): ... # Task: Write a Python test function `test_hunyuan_a13b_tool_parser_streaming` to verify the behavior of `hunyuan_a13b_tool_parser_streaming`. Module under test: tests.entrypoints.openai.tool_parsers.utils, vllm.entrypoints.openai.engine.protocol, vllm.tool_parsers
def test_hunyuan_a13b_tool_parser_streaming(model_deltas, expected_tool_calls): mock_tokenizer = MagicMock() tool_parser: ToolParser = ToolParserManager.get_tool_parser("hunyuan_a13b")( mock_tokenizer ) reconstructor = run_tool_extraction_streaming( tool_parser, model_deltas, assert_one_tool_per_delta=False ) # align the random id. for idx in range(len(reconstructor.tool_calls)): reconstructor.tool_calls[idx].id = expected_tool_calls[idx].id assert reconstructor.tool_calls == expected_tool_calls
test
1
{"function_name": "test_hunyuan_a13b_tool_parser_streaming", "class_name": null, "qualname": "test_hunyuan_a13b_tool_parser_streaming", "file_path": "tests/entrypoints/openai/tool_parsers/test_hunyuan_a13b_tool_parser.py", "repo_id": "vllm-project/vllm", "loc": 15, "tested_modules": ["tests.entrypoints.openai.tool_parsers.utils", "vllm.entrypoints.openai.engine.protocol", "vllm.tool_parsers"], "has_docstring": false, "runnable_level": "project_runnable"}
zhayujie/chatgpt-on-wechat:agent/tools/scheduler/scheduler_tool.py:module_doc
Write a module-level docstring for the Python module `scheduler_tool` which contains class `SchedulerTool`.
Scheduler tool for creating and managing scheduled tasks
documentation
1
{"doc_type": "module", "module_name": "scheduler_tool", "file_path": "agent/tools/scheduler/scheduler_tool.py", "repo_id": "zhayujie/chatgpt-on-wechat", "char_length": 56}
infiniflow/ragflow:test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py:TestChatAssistantsDelete.test_repeated_deletion
# Context: import pytest class TestChatAssistantsDelete: def test_basic_scenarios(self, client, add_chat_assistants_func, payload, expected_message, remaining): ... def test_delete_chats_nonzero_response_raises(self, client, monkeypatch): ... def test_delete_partial_invalid_id(self, client, add_chat_assistants_func, payload): ... def test_duplicate_deletion(self, client, add_chat_assistants_func): ... def test_concurrent_deletion(self, client): ... def test_delete_1k(self, client): ... # Task: Write a Python test method `test_repeated_deletion` in test class `TestChatAssistantsDelete` to verify the behavior of `repeated_deletion`. Module under test: concurrent.futures, common
def test_repeated_deletion(self, client, add_chat_assistants_func): _, _, chat_assistants = add_chat_assistants_func chat_ids = [chat.id for chat in chat_assistants] client.delete_chats(ids=chat_ids) with pytest.raises(Exception) as exception_info: client.delete_chats(ids=chat_ids) assert "not found" in str(exception_info.value)
test
1
{"function_name": "test_repeated_deletion", "class_name": "TestChatAssistantsDelete", "qualname": "TestChatAssistantsDelete.test_repeated_deletion", "file_path": "test/testcases/test_sdk_api/test_chat_assistant_management/test_delete_chat_assistants.py", "repo_id": "infiniflow/ragflow", "loc": 8, "tested_modules": ["concurrent.futures", "common"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_keycloak.py:TestKeycloakProvider.test_get_token_url_with_different_domain
# Context: from crewai.cli.authentication.main import Oauth2Settings from crewai.cli.authentication.providers.keycloak import KeycloakProvider class TestKeycloakProvider: def setup_method(self): ... def test_initialization_with_valid_settings(self): ... def test_get_authorize_url(self): ... def test_get_authorize_url_with_different_domain(self): ... def test_get_token_url(self): ... def test_get_jwks_url(self): ... def test_get_jwks_url_with_different_domain(self): ... def test_get_issuer(self): ... def test_get_issuer_with_different_domain(self): ... def test_get_audience(self): ... def test_get_client_id(self): ... def test_get_required_fields(self): ... def test_oauth2_base_url(self): ... def test_oauth2_base_url_strips_https_prefix(self): ... def test_oauth2_base_url_strips_http_prefix(self): ... # Task: Write a Python test method `test_get_token_url_with_different_domain` in test class `TestKeycloakProvider` to verify the behavior of `get_token_url_with_different_domain`. Module under test: crewai.cli.authentication.main, crewai.cli.authentication.providers.keycloak
def test_get_token_url_with_different_domain(self): settings = Oauth2Settings( provider="keycloak", domain="sso.enterprise.com", client_id="test-client", audience="test-audience", extra={ "realm": "enterprise-realm" } ) provider = KeycloakProvider(settings) expected_url = "https://sso.enterprise.com/realms/enterprise-realm/protocol/openid-connect/token" assert provider.get_token_url() == expected_url
test
0
{"function_name": "test_get_token_url_with_different_domain", "class_name": "TestKeycloakProvider", "qualname": "TestKeycloakProvider.test_get_token_url_with_different_domain", "file_path": "lib/crewai/tests/cli/authentication/providers/test_keycloak.py", "repo_id": "crewAIInc/crewAI", "loc": 13, "tested_modules": ["crewai.cli.authentication.main", "crewai.cli.authentication.providers.keycloak"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestRequiredOptional.test_required_field_has_no_default
# Context: import pytest from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestBuildRichFieldDescription: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestStripUnsupportedFormats: ... class TestEnsureTypeInSchemas: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestRequiredOptional: def test_optional_field_defaults_to_none(self) -> None: ... def test_mixed_required_optional(self) -> None: ... # Task: Write a Python test method `test_required_field_has_no_default` in test class `TestRequiredOptional` to verify the behavior of `required_field_has_no_default`. Module under test: __future__, copy, typing
def test_required_field_has_no_default(self) -> None: schema = { "type": "object", "properties": {"name": {"type": "string"}}, "required": ["name"], } Model = create_model_from_schema(schema) with pytest.raises(Exception): Model()
test
0
{"function_name": "test_required_field_has_no_default", "class_name": "TestRequiredOptional", "qualname": "TestRequiredOptional.test_required_field_has_no_default", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_decorator.py:TestHumanFeedbackLearn.test_learn_true_empty_feedback_does_not_store
# Context: from unittest.mock import MagicMock, patch from crewai.flow import Flow, human_feedback, listen, start class TestHumanFeedbackValidation: ... class TestHumanFeedbackConfig: ... class TestHumanFeedbackResult: ... class TestDecoratorAttributePreservation: ... class TestAsyncSupport: ... class TestHumanFeedbackExecution: ... class TestHumanFeedbackHistory: ... class TestCollapseToOutcome: ... class TestHumanFeedbackLearn: def test_learn_false_does_not_interact_with_memory(self): ... def test_learn_true_stores_distilled_lessons(self): ... def test_learn_true_pre_reviews_with_past_lessons(self): ... def test_learn_true_uses_default_llm(self): ... # Task: Write a Python test method `test_learn_true_empty_feedback_does_not_store` in test class `TestHumanFeedbackLearn` to when learn=True but feedback is empty, no lessons are stored. Module under test: __future__, datetime, typing
def test_learn_true_empty_feedback_does_not_store(self): """When learn=True but feedback is empty, no lessons are stored.""" class LearnFlow(Flow): @start() @human_feedback(message="Review:", llm="gpt-4o-mini", learn=True) def produce(self): return "output" flow = LearnFlow() flow.memory = MagicMock() flow.memory.recall.return_value = [] with patch.object( flow, "_request_human_feedback", return_value="" ): flow.produce() # Empty feedback -> no distillation, no storage flow.memory.remember_many.assert_not_called()
test
0
{"function_name": "test_learn_true_empty_feedback_does_not_store", "class_name": "TestHumanFeedbackLearn", "qualname": "TestHumanFeedbackLearn.test_learn_true_empty_feedback_does_not_store", "file_path": "lib/crewai/tests/test_human_feedback_decorator.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow", "crewai.flow.human_feedback"], "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:tests/ci/test_cli_headed_flag.py:test_headed_flag_with_session
# Context: from browser_use.skill_cli.main import build_parser def test_headed_flag_before_open_subcommand(): ... def test_headed_flag_default_is_false(): ... def test_headed_flag_with_browser_mode(): ... # Task: Write a Python test function `test_headed_flag_with_session` to test that --headed works with other global flags like -s/--session. Module under test: browser_use.skill_cli.main
def test_headed_flag_with_session(): """Test that --headed works with other global flags like -s/--session.""" parser = build_parser() args = parser.parse_args(['--headed', '-s', 'mysession', 'open', 'http://example.com']) assert args.headed is True assert args.session == 'mysession' assert args.url == 'http://example.com'
test
0
{"function_name": "test_headed_flag_with_session", "class_name": null, "qualname": "test_headed_flag_with_session", "file_path": "tests/ci/test_cli_headed_flag.py", "repo_id": "browser-use/browser-use", "loc": 8, "tested_modules": ["browser_use.skill_cli.main"], "has_docstring": true, "runnable_level": "project_runnable"}
xtekky/gpt4free:g4f/Provider/qwen/cookie_generator.py:refresh_cookies
# Context: from g4f import debug import asyncio def lzw_compress(data: Optional[str], bits: int, char_func: Callable[[int], str]) -> str: ... def custom_encode(data: Optional[str], url_safe: bool) -> str: ... def random_hash() -> int: ... def generate_device_id() -> str: ... def parse_real_data(real_data: str) -> List[str]: ... def process_fields(fields: List[str]) -> List[Union[str, int]]: ... def generate_cookies(real_data: Optional[str], fingerprint_options: Optional[Dict[str, Any]]) -> Dict[str, Any]: ... def generate_batch(count: int, real_data: Optional[str], fingerprint_options: Optional[Dict[str, Any]]) -> List[Dict[str, Any]]: ... async def _refresh_loop() -> None: ... def init_ssxmod_manager() -> None: ... async def stop_refresh() -> None: ... async def get_ssxmod_itna() -> str: ... async def get_ssxmod_itna2() -> str: ... async def get_cookies() -> Dict[str, Any]: ... # Task: Write a Python async function `refresh_cookies` to refresh SSXMOD cookies (async wrapper).
async def refresh_cookies(): """Refresh SSXMOD cookies (async wrapper).""" global _current_cookies try: # generate_cookies() is CPU-bound sync; run it off the event loop. result = await asyncio.to_thread(generate_cookies) async with _lock: _current_cookies = { "ssxmod_itna": result["ssxmod_itna"], "ssxmod_itna2": result["ssxmod_itna2"], "timestamp": result["timestamp"], } debug.log("SSXMOD Cookie 已刷新", "SSXMOD") except Exception as e: debug.error("SSXMOD Cookie 刷新失败", "SSXMOD", "", str(e)) return _current_cookies
function_simple
1
{"cognitive_complexity": 1, "loc": 18, "code_loc": 13, "docstring_loc": 1, "function_name": "refresh_cookies", "class_name": null, "qualname": "refresh_cookies", "file_path": "g4f/Provider/qwen/cookie_generator.py", "repo_id": "xtekky/gpt4free", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py:InvokeCrewAIAutomationTool:class_doc
Write a class-level docstring for `InvokeCrewAIAutomationTool` (inherits from BaseTool) which has methods: `__init__`, `_kickoff_crew`, `_get_crew_status`, `_run`.
A CrewAI tool for invoking external crew/flows APIs. This tool provides CrewAI Platform API integration with external crew services, supporting: - Dynamic input schema configuration - Automatic polling for task completion - Bearer token authentication - Comprehensive error handling Example: Basic usage: >>> tool = InvokeCrewAIAutomationTool( ... crew_api_url="https://api.example.com", ... crew_bearer_token="your_token", ... crew_name="My Crew", ... crew_description="Description of what the crew does", ... ) With custom inputs: >>> custom_inputs = { ... "param1": Field(..., description="Description of param1"), ... "param2": Field( ... default="default_value", description="Description of param2" ... ), ... } >>> tool = InvokeCrewAIAutomationTool( ... crew_api_url="https://api.example.com", ... crew_bearer_token="your_token", ... crew_name="My Crew", ... crew_description="Description of what the crew does", ... crew_inputs=custom_inputs, ... ) Example: >>> tools = [ ... InvokeCrewAIAutomationTool( ... crew_api_url="https://canary-crew-[...].crewai.com", ... crew_bearer_token="[Your token: abcdef012345]", ... crew_name="State of AI Report", ... crew_description="Retrieves a report on state of AI for a given year.", ... crew_inputs={ ... "year": Field( ... ..., description="Year to retrieve the report for (integer)" ... ) ... }, ... ) ... ]
documentation
0
{"doc_type": "class", "class_name": "InvokeCrewAIAutomationTool", "file_path": "lib/crewai-tools/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py", "repo_id": "crewAIInc/crewAI", "char_length": 1650, "methods": ["__init__", "_kickoff_crew", "_get_crew_status", "_run"]}
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/config.py:A2AClientConfig:class_doc
Write a class-level docstring for `A2AClientConfig` (inherits from BaseModel) which has methods: `_migrate_deprecated_transport_fields`.
Configuration for connecting to remote A2A agents. Attributes: endpoint: A2A agent endpoint URL. auth: Authentication scheme. timeout: Request timeout in seconds. max_turns: Maximum conversation turns with A2A agent. response_model: Optional Pydantic model for structured A2A agent responses. fail_fast: If True, raise error when agent unreachable; if False, skip and continue. trust_remote_completion_status: If True, return A2A agent's result directly when completed. updates: Update mechanism config. accepted_output_modes: Media types the client can accept in responses. extensions: Extension URIs the client supports (A2A protocol extensions). client_extensions: Client-side processing hooks for tool injection and prompt augmentation. transport: Transport configuration (preferred, supported transports, gRPC settings).
documentation
0
{"doc_type": "class", "class_name": "A2AClientConfig", "file_path": "lib/crewai/src/crewai/a2a/config.py", "repo_id": "crewAIInc/crewAI", "char_length": 874, "methods": ["_migrate_deprecated_transport_fields"]}
ray-project/ray:python/ray/llm/tests/common/cloud/test_utils.py:TestRemoteObjectCacheDecorator.test_expiration
# Context: import asyncio import pytest from ray.llm._internal.common.utils.cloud_utils import ( CloudObjectCache, is_remote_path, remote_object_cache, ) class MockSyncFetcher: ... class MockAsyncFetcher: ... class TestCloudObjectCache: ... class TestIsRemotePath: ... class TestRemoteObjectCacheDecorator: async def test_basic_functionality(self): ... async def test_error_handling(self): ... async def test_concurrent_access(self): ... # Task: Write a Python test method `test_expiration` in test class `TestRemoteObjectCacheDecorator` to test cache expiration for both missing and existing objects. Module under test: ray.llm._internal.common.utils.cloud_utils
async def test_expiration(self): """Test cache expiration for both missing and existing objects.""" call_count = 0 MISSING = object() @remote_object_cache( max_size=2, missing_expire_seconds=1, # 1 second to expire missing object exists_expire_seconds=3, # 3 seconds to expire existing object missing_object_value=MISSING, ) async def fetch(key: str): nonlocal call_count call_count += 1 if key == "missing": return MISSING return f"value-{key}" # Test missing object expiration assert await fetch("missing") is MISSING assert call_count == 1 assert await fetch("missing") is MISSING # Should hit cache assert call_count == 1 await asyncio.sleep(1.5) # Wait for missing object to expire assert await fetch("missing") is MISSING # Should fetch again assert call_count == 2 # Test existing object expiration assert await fetch("key1") == "value-key1" assert call_count == 3 assert await fetch("key1") == "value-key1" # Should hit cache assert call_count == 3 await asyncio.sleep(1.5) # Not expired yet assert await fetch("key1") == "value-key1" # Should still hit cache assert call_count == 3 await asyncio.sleep(2) # Now expired (total > 3 seconds) assert await fetch("key1") == "value-key1" # Should fetch again assert call_count == 4
test
0
{"function_name": "test_expiration", "class_name": "TestRemoteObjectCacheDecorator", "qualname": "TestRemoteObjectCacheDecorator.test_expiration", "file_path": "python/ray/llm/tests/common/cloud/test_utils.py", "repo_id": "ray-project/ray", "loc": 41, "tested_modules": ["ray.llm._internal.common.utils.cloud_utils"], "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/diffusers:src/diffusers/pipelines/qwenimage/pipeline_qwenimage_controlnet.py:license_header
Add a Apache-2.0 license header comment for the project 'diffusers', authored by Qwen-Image Team, InstantX Team and The HuggingFace Team, year 2025.
# Copyright 2025 Qwen-Image Team, InstantX Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
license
1
{"license_type": "Apache-2.0", "author": "Qwen-Image Team, InstantX Team and The HuggingFace Team", "year": "2025", "source": "header", "repo_id": "huggingface/diffusers"}
browser-use/browser-use:browser_use/skills/service.py:module_doc
Write a module-level docstring for the Python module `service` which contains class `SkillService`.
Skills service for fetching and executing skills from the Browser Use API
documentation
0
{"doc_type": "module", "module_name": "service", "file_path": "browser_use/skills/service.py", "repo_id": "browser-use/browser-use", "char_length": 73}
infiniflow/ragflow:test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py:TestChunksRetrieval.test_keyword
# Context: import pytest from common import retrieval_chunks class TestAuthorization: ... class TestChunksRetrieval: def test_basic_scenarios(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_page(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_page_size(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_vector_similarity_weight(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_top_k(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): ... def test_rerank_id(self, WebApiAuth, add_chunks, payload, expected_code, expected_message): ... def test_highlight(self, WebApiAuth, add_chunks, payload, expected_code, expected_highlight, expected_message): ... def test_invalid_params(self, WebApiAuth, add_chunks): ... def test_concurrent_retrieval(self, WebApiAuth, add_chunks): ... # Task: Write a Python test method `test_keyword` in test class `TestChunksRetrieval` to verify the behavior of `keyword`. Module under test: concurrent.futures, common, configs
def test_keyword(self, WebApiAuth, add_chunks, payload, expected_code, expected_page_size, expected_message): dataset_id, _, _ = add_chunks payload.update({"question": "chunk test", "kb_id": [dataset_id]}) res = retrieval_chunks(WebApiAuth, payload) assert res["code"] == expected_code, res if expected_code == 0: assert len(res["data"]["chunks"]) == expected_page_size, res else: assert res["message"] == expected_message, res
test
1
{"function_name": "test_keyword", "class_name": "TestChunksRetrieval", "qualname": "TestChunksRetrieval.test_keyword", "file_path": "test/testcases/test_web_api/test_chunk_app/test_retrieval_chunks.py", "repo_id": "infiniflow/ragflow", "loc": 9, "tested_modules": ["concurrent.futures", "common", "configs", "libs.auth"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai-tools/tests/tools/tool_collection_test.py:TestToolCollection.test_access_by_index
# Context: class TestToolCollection(unittest.TestCase): def setUp(self): ... def _create_mock_tool(self, name, description): ... def test_initialization(self): ... def test_empty_initialization(self): ... def test_initialization_with_none(self): ... def test_access_by_name(self): ... def test_key_error_for_invalid_name(self): ... def test_index_error_for_invalid_index(self): ... def test_negative_index(self): ... def test_append(self): ... def test_append_duplicate_name(self): ... def test_extend(self): ... def test_insert(self): ... def test_remove(self): ... def test_remove_nonexistent_tool(self): ... def test_pop(self): ... def test_pop_last(self): ... def test_clear(self): ... def test_iteration(self): ... def test_contains(self): ... def test_slicing(self): ... def test_getitem_with_tool_name_as_int(self): ... def test_filter_by_names(self): ... def test_filter_where(self): ... # Task: Write a Python test method `test_access_by_index` in test class `TestToolCollection` to verify the behavior of `access_by_index`. Module under test: crewai.tools, crewai_tools.adapters.tool_collection
def test_access_by_index(self): self.assertEqual(self.tools[0], self.search_tool) self.assertEqual(self.tools[1], self.calculator_tool) self.assertEqual(self.tools[2], self.translator_tool)
test
0
{"function_name": "test_access_by_index", "class_name": "TestToolCollection", "qualname": "TestToolCollection.test_access_by_index", "file_path": "lib/crewai-tools/tests/tools/tool_collection_test.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["crewai.tools", "crewai_tools.adapters.tool_collection"], "has_docstring": false, "runnable_level": "class_runnable"}
browser-use/browser-use:tests/ci/test_structured_extraction.py:TestSchemaDictToPydanticModel.test_rejects_ref
# Context: import pytest from browser_use.tools.extraction.schema_utils import schema_dict_to_pydantic_model class TestExtractionResult: ... def _make_extraction_llm(structured_response: dict | None, freetext_response: str) -> BaseChatModel: ... async def browser_session(): ... def http_server(): ... def base_url(http_server): ... class TestExtractStructured: ... class TestExtractionSchemaInjection: ... class TestSchemaDictToPydanticModel: def test_flat_object(self): ... def test_nested_object(self): ... def test_array_of_objects(self): ... def test_array_of_primitives(self): ... def test_enum_field(self): ... def test_optional_enum_defaults_to_none(self): ... def test_optional_fields_get_type_appropriate_defaults(self): ... def test_optional_non_nullable_rejects_null(self): ... def test_optional_with_explicit_default(self): ... def test_optional_nested_object_defaults_to_none(self): ... def test_model_name_from_title(self): ... def test_model_validate_json_roundtrip(self): ... def test_rejects_allOf(self): ... def test_rejects_non_object_toplevel(self): ... def test_rejects_empty_properties(self): ... def test_extra_fields_forbidden(self): ... def test_nullable_field(self): ... def test_field_descriptions_preserved(self): ... # Task: Write a Python test method `test_rejects_ref` in test class `TestSchemaDictToPydanticModel` to verify the behavior of `rejects_ref`. Module under test: pydantic, browser_use.agent.views, browser_use.browser
def test_rejects_ref(self): schema = { 'type': 'object', 'properties': {'item': {'$ref': '#/$defs/Item'}}, '$defs': {'Item': {'type': 'object', 'properties': {'name': {'type': 'string'}}}}, } with pytest.raises(ValueError, match='Unsupported JSON Schema keyword'): schema_dict_to_pydantic_model(schema)
test
0
{"function_name": "test_rejects_ref", "class_name": "TestSchemaDictToPydanticModel", "qualname": "TestSchemaDictToPydanticModel.test_rejects_ref", "file_path": "tests/ci/test_structured_extraction.py", "repo_id": "browser-use/browser-use", "loc": 8, "tested_modules": ["pydantic", "browser_use.agent.views", "browser_use.browser", "browser_use.filesystem.file_system", "browser_use.llm.base"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:ci/ray_ci/ray_image.py:RayImage.wanda_image_name
# Context: class RayImageError(Exception): ... class RayImage: def __post_init__(self): ... def arch_suffix(self) -> str: ... def repo(self) -> str: ... def variation_suffix(self) -> str: ... def validate(self) -> None: ... # Task: Write a Python method `wanda_image_name` for the class `RayImage` to wanda output image name (without registry prefix). Returns: str
def wanda_image_name(self) -> str: """Wanda output image name (without registry prefix).""" if self.platform == "cpu": return f"{self.image_type}-py{self.python_version}-cpu{self.arch_suffix}" return f"{self.image_type}-py{self.python_version}-{self.platform}{self.arch_suffix}"
function_simple
0
{"cognitive_complexity": 1, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "wanda_image_name", "class_name": "RayImage", "qualname": "RayImage.wanda_image_name", "file_path": "ci/ray_ci/ray_image.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"}
infiniflow/ragflow:common/data_source/confluence_connector.py:ConfluenceConnector._fetch_page_attachments
# Context: import logging from pathlib import Path from typing import Any, cast, Iterator, Callable, Generator from common.data_source.config import INDEX_BATCH_SIZE, DocumentSource, CONTINUE_ON_CONNECTOR_FAILURE, \ CONFLUENCE_CONNECTOR_LABELS_TO_SKIP, CONFLUENCE_TIMEZONE_OFFSET, CONFLUENCE_CONNECTOR_USER_PROFILES_OVERRIDE, \ CONFLUENCE_SYNC_TIME_BUFFER_SECONDS, \ OAUTH_CONFLUENCE_CLOUD_CLIENT_ID, OAUTH_CONFLUENCE_CLOUD_CLIENT_SECRET, _DEFAULT_PAGINATION_LIMIT, \ _PROBLEMATIC_EXPANSIONS, _REPLACEMENT_EXPANSIONS, _USER_NOT_FOUND, _COMMENT_EXPANSION_FIELDS, \ _ATTACHMENT_EXPANSION_FIELDS, _PAGE_EXPANSION_FIELDS, ONE_DAY, ONE_HOUR, _RESTRICTIONS_EXPANSION_FIELDS, \ _SLIM_DOC_BATCH_SIZE, CONFLUENCE_CONNECTOR_ATTACHMENT_SIZE_THRESHOLD from common.data_source.interfaces import ( ConnectorCheckpoint, CredentialsConnector, SecondsSinceUnixEpoch, SlimConnectorWithPermSync, StaticCredentialsProvider, CheckpointedConnector, SlimConnector, CredentialsProviderInterface, ConfluenceUser, IndexingHeartbeatInterface, AttachmentProcessingResult, CheckpointOutput ) from common.data_source.models import ConnectorFailure, Document, TextSection, ImageSection, BasicExpertInfo, \ DocumentFailure, GenerateSlimDocumentOutput, SlimDocument, ExternalAccess from common.data_source.utils import load_all_docs_from_checkpoint_connector, scoped_url, \ process_confluence_user_profiles_override, confluence_refresh_tokens, run_with_timeout, _handle_http_error, \ update_param_in_path, get_start_param_from_url, build_confluence_document_id, datetime_from_string, \ is_atlassian_date_error, validate_attachment_filetype class ConfluenceCheckpoint(ConnectorCheckpoint): ... class ConfluenceRateLimitError(Exception): ... class OnyxConfluence: ... def get_user_email_from_username__server(confluence_client: OnyxConfluence, user_name: str) -> str | None: ... def _get_user(confluence_client: OnyxConfluence, user_id: str) -> str: ... def sanitize_attachment_title(title: str) -> str: ... def extract_text_from_confluence_html(confluence_client: OnyxConfluence, confluence_object: dict[str, Any], fetched_titles: set[str]) -> str: ... def _remove_macro_stylings(soup: bs4.BeautifulSoup) -> None: ... def get_page_restrictions(confluence_client: OnyxConfluence, page_id: str, page_restrictions: dict[str, Any], ancestors: list[dict[str, Any]]) -> ExternalAccess | None: ... def get_all_space_permissions(confluence_client: OnyxConfluence, is_cloud: bool) -> dict[str, ExternalAccess]: ... def _make_attachment_link(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], parent_content_id: str | None) -> str | None: ... def _process_image_attachment(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], raw_bytes: bytes, media_type: str) -> AttachmentProcessingResult: ... def process_attachment(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], parent_content_id: str | None, allow_images: bool) -> AttachmentProcessingResult: ... def convert_attachment_to_content(confluence_client: 'OnyxConfluence', attachment: dict[str, Any], page_id: str, allow_images: bool) -> tuple[str | None, bytes | bytearray | None] | None: ... class ConfluenceConnector(CheckpointedConnector[ConfluenceCheckpoint], SlimConnector, SlimConnectorWithPermSync, CredentialsConnector): def __init__( self, wiki_base: str, is_cloud: bool, space: str = "", page_id: str = "", index_recursively: bool = False, cql_query: str | None = None, batch_size: int = INDEX_BATCH_SIZE, continue_on_failure: bool = CONTINUE_ON_CONNECTOR_FAILURE, # if a page has one of the labels specified in this list, we will just # skip it. This is generally used to avoid indexing extra sensitive # pages. labels_to_skip: list[str] = CONFLUENCE_CONNECTOR_LABELS_TO_SKIP, timezone_offset: float = CONFLUENCE_TIMEZONE_OFFSET, time_buffer_seconds: int = CONFLUENCE_SYNC_TIME_BUFFER_SECONDS, scoped_token: bool = False, ) -> None: self.wiki_base = wiki_base self.is_cloud = is_cloud self.space = space self.page_id = page_id self.index_recursively = index_recursively self.cql_query = cql_query self.batch_size = batch_size self.labels_to_skip = labels_to_skip self.timezone_offset = timezone_offset self.time_buffer_seconds = max(0, time_buffer_seconds) self.scoped_token = scoped_token self._confluence_client: OnyxConfluence | None = None self._low_timeout_confluence_client: OnyxConfluence | None = None self._fetched_titles: set[str] = set() self.allow_images = False # Track document names to detect duplicates self._document_name_counts: dict[str, int] = {} self._document_name_paths: dict[str, list[str]] = {} # Remove trailing slash from wiki_base if present self.wiki_base = wiki_base.rstrip("/") """ If nothing is provided, we default to fetching all pages Only one or none of the following options should be specified so the order shouldn't matter However, we use elif to ensure that only of the following is enforced """ base_cql_page_query = "type=page" if cql_query: base_cql_page_query = cql_query elif page_id: if index_recursively: base_cql_page_query += f" and (ancestor='{page_id}' or id='{page_id}')" else: base_cql_page_query += f" and id='{page_id}'" elif space: uri_safe_space = quote(space) base_cql_page_query += f" and space='{uri_safe_space}'" self.base_cql_page_query = base_cql_page_query self.cql_label_filter = "" if labels_to_skip: labels_to_skip = list(set(labels_to_skip)) comma_separated_labels = ",".join( f"'{quote(label)}'" for label in labels_to_skip ) self.cql_label_filter = f" and label not in ({comma_separated_labels})" self.timezone: timezone = timezone(offset=timedelta(hours=timezone_offset)) self.credentials_provider: CredentialsProviderInterface | None = None self.probe_kwargs = { "max_backoff_retries": 6, "max_backoff_seconds": 10, } self.final_kwargs = { "max_backoff_retries": 10, "max_backoff_seconds": 60, } # deprecated self.continue_on_failure = continue_on_failure def set_allow_images(self, value: bool) -> None: ... def _adjust_start_for_query(self, start: SecondsSinceUnixEpoch | None) -> SecondsSinceUnixEpoch | None: ... def _is_newer_than_start(self, doc_time: datetime | None, start: SecondsSinceUnixEpoch | None) -> bool: ... def confluence_client(self) -> OnyxConfluence: ... def low_timeout_confluence_client(self) -> OnyxConfluence: ... def set_credentials_provider(self, credentials_provider: CredentialsProviderInterface) -> None: ... def load_credentials(self, credentials: dict[str, Any]) -> dict[str, Any] | None: ... def _construct_page_cql_query(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None) -> str: ... def _construct_attachment_query(self, confluence_page_id: str, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None) -> str: ... def _get_comment_string_for_page_id(self, page_id: str) -> str: ... def _convert_page_to_document(self, page: dict[str, Any]) -> Document | ConnectorFailure: ... def _fetch_document_batches(self, checkpoint: ConfluenceCheckpoint, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None) -> CheckpointOutput[ConfluenceCheckpoint]: ... def _build_page_retrieval_url(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, limit: int) -> str: ... def load_from_checkpoint(self, start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch, checkpoint: ConfluenceCheckpoint) -> CheckpointOutput[ConfluenceCheckpoint]: ... def build_dummy_checkpoint(self) -> ConfluenceCheckpoint: ... def validate_checkpoint_json(self, checkpoint_json: str) -> ConfluenceCheckpoint: ... def retrieve_all_slim_docs(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback: IndexingHeartbeatInterface | None) -> GenerateSlimDocumentOutput: ... def retrieve_all_slim_docs_perm_sync(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback: IndexingHeartbeatInterface | None) -> GenerateSlimDocumentOutput: ... def _retrieve_all_slim_docs(self, start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None, callback: IndexingHeartbeatInterface | None, include_permissions: bool) -> GenerateSlimDocumentOutput: ... def validate_connector_settings(self) -> None: ... # Task: Write a Python method `_fetch_page_attachments` for the class `ConfluenceConnector` to inline attachments are added directly to the document as text or image sections by. Parameters: page: dict[str, Any], start: SecondsSinceUnixEpoch | None, end: SecondsSinceUnixEpoch | None Returns: tuple[list[Document], list[ConnectorFailure]]
def _fetch_page_attachments( self, page: dict[str, Any], start: SecondsSinceUnixEpoch | None = None, end: SecondsSinceUnixEpoch | None = None, ) -> tuple[list[Document], list[ConnectorFailure]]: """ Inline attachments are added directly to the document as text or image sections by this function. The returned documents/connectorfailures are for non-inline attachments and those at the end of the page. """ attachment_query = self._construct_attachment_query(page["id"], start, end) attachment_failures: list[ConnectorFailure] = [] attachment_docs: list[Document] = [] page_url = "" for attachment in self.confluence_client.paginated_cql_retrieval( cql=attachment_query, expand=",".join(_ATTACHMENT_EXPANSION_FIELDS), ): media_type: str = attachment.get("metadata", {}).get("mediaType", "") # TODO(rkuo): this check is partially redundant with validate_attachment_filetype # and checks in convert_attachment_to_content/process_attachment # but doing the check here avoids an unnecessary download. Due for refactoring. if not self.allow_images: if media_type.startswith("image/"): logging.info( f"Skipping attachment because allow images is False: {attachment['title']}" ) continue if not validate_attachment_filetype( attachment, ): logging.info( f"Skipping attachment because it is not an accepted file type: {attachment['title']}" ) continue logging.info( f"Processing attachment: {attachment['title']} attached to page {page['title']}" ) # Attachment document id: use the download URL for stable identity try: object_url = build_confluence_document_id( self.wiki_base, attachment["_links"]["download"], self.is_cloud ) except Exception as e: logging.warning( f"Invalid attachment url for id {attachment['id']}, skipping" ) logging.debug(f"Error building attachment url: {e}") continue try: response = convert_attachment_to_content( confluence_client=self.confluence_client, attachment=attachment, page_id=page["id"], allow_images=self.allow_images, ) if response is None: continue file_storage_name, file_blob = response if not file_blob: logging.info("Skipping attachment because it is no blob fetched") continue # Build attachment-specific metadata attachment_metadata: dict[str, str | list[str]] = {} if "space" in attachment: attachment_metadata["space"] = attachment["space"].get("name", "") labels: list[str] = [] if "metadata" in attachment and "labels" in attachment["metadata"]: for label in attachment["metadata"]["labels"].get("results", []): labels.append(label.get("name", "")) if labels: attachment_metadata["labels"] = labels page_url = page_url or build_confluence_document_id( self.wiki_base, page["_links"]["webui"], self.is_cloud ) attachment_metadata["parent_page_id"] = page_url attachment_id = build_confluence_document_id( self.wiki_base, attachment["_links"]["webui"], self.is_cloud ) # Build semantic identifier with space and page context attachment_title = attachment.get("title", object_url) space_name = page.get("space", {}).get("name", "") page_title = page.get("title", "") # Create hierarchical name: Space / Page / Attachment attachment_path_parts = [] if space_name: attachment_path_parts.append(space_name) if page_title: attachment_path_parts.append(page_title) attachment_path_parts.append(attachment_title) full_attachment_path = " / ".join(attachment_path_parts) if len(attachment_path_parts) > 1 else attachment_title # Track attachment names for duplicate detection if attachment_title not in self._document_name_counts: self._document_name_counts[attachment_title] = 0 self._document_name_paths[attachment_title] = [] self._document_name_counts[attachment_title] += 1 self._document_name_paths[attachment_title].append(full_attachment_path) # Use simple name if no duplicates, otherwise use full path if self._document_name_counts[attachment_title] == 1: attachment_semantic_identifier = attachment_title else: attachment_semantic_identifier = full_attachment_path primary_owners: list[BasicExpertInfo] | None = None if "version" in attachment and "by" in attachment["version"]: author = attachment["version"]["by"] display_name = author.get("displayName", "Unknown") email = author.get("email", "unknown@domain.invalid") primary_owners = [ BasicExpertInfo(display_name=display_name, email=email) ] extension = Path(attachment.get("title", "")).suffix or ".unknown" attachment_doc = Document( id=attachment_id, # sections=sections, source=DocumentSource.CONFLUENCE, semantic_identifier=attachment_semantic_identifier, extension=extension, blob=file_blob, size_bytes=len(file_blob), metadata=attachment_metadata, doc_updated_at=( datetime_from_string(attachment["version"]["when"]) if attachment.get("version") and attachment["version"].get("when") else None ), primary_owners=primary_owners, ) if self._is_newer_than_start(attachment_doc.doc_updated_at, start): attachment_docs.append(attachment_doc) except Exception as e: logging.error( f"Failed to extract/summarize attachment {attachment['title']}", exc_info=e, ) if is_atlassian_date_error(e): # propagate error to be caught and retried raise attachment_failures.append( ConnectorFailure( failed_document=DocumentFailure( document_id=object_url, document_link=object_url, ), failure_message=f"Failed to extract/summarize attachment {attachment['title']} for doc {object_url}", exception=e, ) ) return attachment_docs, attachment_failures
function_complex
1
{"cognitive_complexity": 58, "loc": 167, "code_loc": 128, "docstring_loc": 5, "function_name": "_fetch_page_attachments", "class_name": "ConfluenceConnector", "qualname": "ConfluenceConnector._fetch_page_attachments", "file_path": "common/data_source/confluence_connector.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/_internal/batch/benchmark/benchmark_processor.py:_build_serve_deployment_config
# Context: from ray.data.llm import ( ChatTemplateStageConfig, DetokenizeStageConfig, ServeDeploymentProcessorConfig, TokenizerStageConfig, build_processor, vLLMEngineProcessorConfig, ) from ray.serve.llm.openai_api_models import CompletionRequest class Mode(Enum): ... def build_vllm_engine_kwargs(**kwargs) -> dict: ... def _build_vllm_engine_config(model: str, batch_size: int, concurrency: int, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str, task_type: str, max_model_len: int) -> vLLMEngineProcessorConfig: ... class BenchmarkResult: ... def build_single_vllm_engine_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str): ... def build_shared_vllm_engine_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str): ... def build_classify_processor(batch_size: int, concurrency: int, model: str, pooling_params: dict, max_model_len: int, distributed_executor_backend: str): ... def setup_serve_deployment(model: str, concurrency: int) -> tuple[str, str]: ... def _is_app_ready(app_name: str) -> bool: ... def build_single_serve_deployment_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, deployment_name: str, app_name: str, **kwargs): ... def build_shared_serve_deployment_processor(batch_size: int, concurrency: int, model: str, sampling_params: dict, deployment_name: str, app_name: str, **kwargs): ... def run_processor(mode: Mode, dataset: data.Dataset, builder, **kwargs) -> BenchmarkResult: ... def benchmark(mode: Mode, dataset: data.Dataset, batch_size: int, concurrency: int, model: str, sampling_params: dict, pipeline_parallel_size: int, tensor_parallel_size: int, distributed_executor_backend: str) -> BenchmarkResult: ... def parse_args(argv: list[str]) -> argparse.Namespace: ... def main() -> None: ... # Task: Write a Python function `_build_serve_deployment_config` to helper to create ServeDeploymentProcessorConfig. Parameters: batch_size: int, concurrency: int, deployment_name: str, app_name: str Returns: ServeDeploymentProcessorConfig
def _build_serve_deployment_config( batch_size: int, concurrency: int, deployment_name: str = None, app_name: str = None, ) -> ServeDeploymentProcessorConfig: """Helper to create ServeDeploymentProcessorConfig.""" return ServeDeploymentProcessorConfig( deployment_name=deployment_name, app_name=app_name, dtype_mapping={ "CompletionRequest": CompletionRequest, }, batch_size=batch_size, concurrency=concurrency, )
function_simple
0
{"cognitive_complexity": 0, "loc": 16, "code_loc": 9, "docstring_loc": 1, "function_name": "_build_serve_deployment_config", "class_name": null, "qualname": "_build_serve_deployment_config", "file_path": "python/ray/llm/_internal/batch/benchmark/benchmark_processor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "plib_runnable"}
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_type_converter_component.py:TestTypeConverterComponent.test_dataframe_to_data
# Context: import pandas as pd from lfx.schema.data import Data from lfx.schema.dataframe import DataFrame class TestTypeConverterComponent(ComponentTestBaseWithoutClient): def component_class(self): ... def file_names_mapping(self): ... def test_message_to_message(self, component_class): ... def test_message_to_data(self, component_class): ... def test_message_to_dataframe(self, component_class): ... def test_data_to_message(self, component_class): ... def test_data_to_data(self, component_class): ... def test_data_to_dataframe(self, component_class): ... def test_dataframe_to_message(self, component_class): ... def test_dataframe_to_dataframe(self, component_class): ... def test_update_outputs(self, component_class): ... def test_message_with_valid_json_text_to_data(self, component_class): ... def test_message_with_invalid_json_text_to_data(self, component_class): ... def test_message_with_valid_json_array_to_data(self, component_class): ... def test_message_with_valid_csv_to_data(self, component_class): ... def test_message_with_valid_csv_to_dataframe(self, component_class): ... def test_message_with_valid_json_object_to_dataframe(self, component_class): ... def test_message_with_valid_json_array_to_dataframe(self, component_class): ... def test_message_with_compact_json_array_to_dataframe(self, component_class): ... # Task: Write a Python test method `test_dataframe_to_data` in test class `TestTypeConverterComponent` to test converting DataFrame to Data. Module under test: io, lfx.components.processing.converter, lfx.schema.data
def test_dataframe_to_data(self, component_class): """Test converting DataFrame to Data.""" df_data = pd.DataFrame({"col1": ["Hello"]}) component = component_class(input_data=DataFrame(data=df_data), output_type="Data") result = component.convert_to_data() assert isinstance(result, Data) assert isinstance(result.data, dict)
test
1
{"function_name": "test_dataframe_to_data", "class_name": "TestTypeConverterComponent", "qualname": "TestTypeConverterComponent.test_dataframe_to_data", "file_path": "src/backend/tests/unit/components/processing/test_type_converter_component.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["io", "lfx.components.processing.converter", "lfx.schema.data", "lfx.schema.dataframe", "lfx.schema.message"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/ssh/tests/unit/ssh/hooks/test_ssh_async.py:TestSSHHookAsync.test_parse_extras_host_key
# Context: from unittest import mock from airflow.providers.ssh.hooks.ssh import SSHHookAsync class TestSSHHookAsync: def test_init_with_conn_id(self): ... def test_init_with_overrides(self): ... def test_init_default_known_hosts(self): ... def test_parse_extras_key_file(self): ... def test_parse_extras_no_host_key_check(self): ... def test_parse_extras_host_key_with_no_check_raises(self): ... def test_parse_extras_private_key(self): ... async def test_get_conn_builds_config(self): ... async def test_run_command(self): ... async def test_run_command_output(self): ... # Task: Write a Python test method `test_parse_extras_host_key` in test class `TestSSHHookAsync` to test parsing host_key from connection extras. Module under test: __future__, airflow.providers.ssh.hooks.ssh
def test_parse_extras_host_key(self): """Test parsing host_key from connection extras.""" hook = SSHHookAsync(ssh_conn_id="test_conn") mock_conn = mock.MagicMock() mock_conn.extra_dejson = {"host_key": "ssh-rsa AAAAB3...", "no_host_key_check": "false"} mock_conn.host = "test.host" hook._parse_extras(mock_conn) assert hook.known_hosts == b"test.host ssh-rsa AAAAB3..."
test
1
{"function_name": "test_parse_extras_host_key", "class_name": "TestSSHHookAsync", "qualname": "TestSSHHookAsync.test_parse_extras_host_key", "file_path": "providers/ssh/tests/unit/ssh/hooks/test_ssh_async.py", "repo_id": "apache/airflow", "loc": 9, "tested_modules": ["__future__", "airflow.providers.ssh.hooks.ssh"], "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/browser/session.py:BrowserSession.get_target_id_from_tab_id
# Context: from cdp_use.cdp.target import SessionID, TargetID class Target(BaseModel): ... class CDPSession(BaseModel): ... class BrowserSession(BaseModel): model_config = ConfigDict( def __init__( self, *, # Cloud browser params - use these for cloud mode cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = None, cloud_timeout: int | None = None, # Backward compatibility aliases profile_id: UUID | str | None = None, proxy_country_code: ProxyCountryCode | None = None, timeout: int | None = None, use_cloud: bool | None = None, cloud_browser: bool | None = None, # Backward compatibility alias cloud_browser_params: CloudBrowserParams | None = None, # Common params that work with cloud id: str | None = None, headers: dict[str, str] | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, captcha_solver: bool | None = None, auto_download_pdfs: bool | None = None, cookie_whitelist_domains: list[str] | None = None, cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, max_iframes: int | None = None, max_iframe_depth: int | None = None, ) -> None: ... def __init__( self, *, # Core configuration for local id: str | None = None, cdp_url: str | None = None, browser_profile: BrowserProfile | None = None, # Local browser launch params executable_path: str | Path | None = None, headless: bool | None = None, user_data_dir: str | Path | None = None, args: list[str] | None = None, downloads_path: str | Path | None = None, # Common params headers: dict[str, str] | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, auto_download_pdfs: bool | None = None, cookie_whitelist_domains: list[str] | None = None, cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, max_iframes: int | None = None, max_iframe_depth: int | None = None, # All other local params env: dict[str, str | float | bool] | None = None, ignore_default_args: list[str] | Literal[True] | None = None, channel: str | None = None, chromium_sandbox: bool | None = None, devtools: bool | None = None, traces_dir: str | Path | None = None, accept_downloads: bool | None = None, permissions: list[str] | None = None, user_agent: str | None = None, screen: dict | None = None, viewport: dict | None = None, no_viewport: bool | None = None, device_scale_factor: float | None = None, record_har_content: str | None = None, record_har_mode: str | None = None, record_har_path: str | Path | None = None, record_video_dir: str | Path | None = None, record_video_framerate: int | None = None, record_video_size: dict | None = None, storage_state: str | Path | dict[str, Any] | None = None, disable_security: bool | None = None, deterministic_rendering: bool | None = None, proxy: ProxySettings | None = None, enable_default_extensions: bool | None = None, captcha_solver: bool | None = None, window_size: dict | None = None, window_position: dict | None = None, filter_highlight_ids: bool | None = None, profile_directory: str | None = None, ) -> None: ... def __init__( self, # Core configuration id: str | None = None, cdp_url: str | None = None, is_local: bool = False, browser_profile: BrowserProfile | None = None, # Cloud browser params (don't mix with local browser params) cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = None, cloud_timeout: int | None = None, # Backward compatibility aliases for cloud params profile_id: UUID | str | None = None, proxy_country_code: ProxyCountryCode | None = None, timeout: int | None = None, # BrowserProfile fields that can be passed directly # From BrowserConnectArgs headers: dict[str, str] | None = None, # From BrowserLaunchArgs env: dict[str, str | float | bool] | None = None, executable_path: str | Path | None = None, headless: bool | None = None, args: list[str] | None = None, ignore_default_args: list[str] | Literal[True] | None = None, channel: str | None = None, chromium_sandbox: bool | None = None, devtools: bool | None = None, downloads_path: str | Path | None = None, traces_dir: str | Path | None = None, # From BrowserContextArgs accept_downloads: bool | None = None, permissions: list[str] | None = None, user_agent: str | None = None, screen: dict | None = None, viewport: dict | None = None, no_viewport: bool | None = None, device_scale_factor: float | None = None, record_har_content: str | None = None, record_har_mode: str | None = None, record_har_path: str | Path | None = None, record_video_dir: str | Path | None = None, record_video_framerate: int | None = None, record_video_size: dict | None = None, # From BrowserLaunchPersistentContextArgs user_data_dir: str | Path | None = None, # From BrowserNewContextArgs storage_state: str | Path | dict[str, Any] | None = None, # BrowserProfile specific fields ## Cloud Browser Fields use_cloud: bool | None = None, cloud_browser: bool | None = None, # Backward compatibility alias cloud_browser_params: CloudBrowserParams | None = None, ## Other params disable_security: bool | None = None, deterministic_rendering: bool | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, proxy: ProxySettings | None = None, enable_default_extensions: bool | None = None, captcha_solver: bool | None = None, window_size: dict | None = None, window_position: dict | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, filter_highlight_ids: bool | None = None, auto_download_pdfs: bool | None = None, profile_directory: str | None = None, cookie_whitelist_domains: list[str] | None = None, # DOM extraction layer configuration cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, # Iframe processing limits max_iframes: int | None = None, max_iframe_depth: int | None = None, ): # Following the same pattern as AgentSettings in service.py # Only pass non-None values to avoid validation errors profile_kwargs = { k: v for k, v in locals().items() if k not in [ 'self', 'browser_profile', 'id', 'cloud_profile_id', 'cloud_proxy_country_code', 'cloud_timeout', 'profile_id', 'proxy_country_code', 'timeout', ] and v is not None } # Handle backward compatibility: prefer cloud_* params over old names final_profile_id = cloud_profile_id if cloud_profile_id is not None else profile_id final_proxy_country_code = cloud_proxy_country_code if cloud_proxy_country_code is not None else proxy_country_code final_timeout = cloud_timeout if cloud_timeout is not None else timeout # If any cloud params are provided, create cloud_browser_params if final_profile_id is not None or final_proxy_country_code is not None or final_timeout is not None: cloud_params = CreateBrowserRequest( cloud_profile_id=final_profile_id, cloud_proxy_country_code=final_proxy_country_code, cloud_timeout=final_timeout, ) profile_kwargs['cloud_browser_params'] = cloud_params profile_kwargs['use_cloud'] = True # Handle backward compatibility: map cloud_browser to use_cloud if 'cloud_browser' in profile_kwargs: profile_kwargs['use_cloud'] = profile_kwargs.pop('cloud_browser') # If cloud_browser_params is set, force use_cloud=True if cloud_browser_params is not None: profile_kwargs['use_cloud'] = True # if is_local is False but executable_path is provided, set is_local to True if is_local is False and executable_path is not None: profile_kwargs['is_local'] = True # Only set is_local=True when cdp_url is missing if we're not using cloud browser # (cloud browser will provide cdp_url later) use_cloud = profile_kwargs.get('use_cloud') or profile_kwargs.get('cloud_browser') if not cdp_url and not use_cloud: profile_kwargs['is_local'] = True # Create browser profile from direct parameters or use provided one if browser_profile is not None: # Merge any direct kwargs into the provided browser_profile (direct kwargs take precedence) merged_kwargs = {**browser_profile.model_dump(exclude_unset=True), **profile_kwargs} resolved_browser_profile = BrowserProfile(**merged_kwargs) else: resolved_browser_profile = BrowserProfile(**profile_kwargs) # Initialize the Pydantic model super().__init__( id=id or str(uuid7str()), browser_profile=resolved_browser_profile, ) def from_system_chrome(cls, profile_directory: str | None, **kwargs) -> Self: ... def list_chrome_profiles(cls) -> list[dict[str, str]]: ... def cdp_url(self) -> str | None: ... def is_local(self) -> bool: ... def is_cdp_connected(self) -> bool: ... async def wait_if_captcha_solving(self, timeout: float | None) -> 'CaptchaWaitResult | None': ... def is_reconnecting(self) -> bool: ... def cloud_browser(self) -> bool: ... def demo_mode(self) -> 'DemoMode | None': ... def logger(self) -> Any: ... def _id_for_logs(self) -> str: ... def _tab_id_for_logs(self) -> str: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... async def reset(self) -> None: ... def model_post_init(self, __context) -> None: ... async def start(self) -> None: ... async def kill(self) -> None: ... async def stop(self) -> None: ... async def on_BrowserStartEvent(self, event: BrowserStartEvent) -> dict[str, str]: ... async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None: ... async def _navigate_and_wait(self, url: str, target_id: str, timeout: float | None, wait_until: str) -> None: ... async def on_SwitchTabEvent(self, event: SwitchTabEvent) -> TargetID: ... async def on_CloseTabEvent(self, event: CloseTabEvent) -> None: ... async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None: ... async def on_TabClosedEvent(self, event: TabClosedEvent) -> None: ... async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None: ... async def on_FileDownloadedEvent(self, event: FileDownloadedEvent) -> None: ... async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None: ... def cdp_client(self) -> CDPClient: ... async def new_page(self, url: str | None) -> 'Page': ... async def get_current_page(self) -> 'Page | None': ... async def must_get_current_page(self) -> 'Page': ... async def get_pages(self) -> list['Page']: ... def get_focused_target(self) -> 'Target | None': ... def get_page_targets(self) -> list['Target']: ... async def close_page(self, page: 'Union[Page, str]') -> None: ... async def cookies(self) -> list['Cookie']: ... async def clear_cookies(self) -> None: ... async def export_storage_state(self, output_path: str | Path | None) -> dict[str, Any]: ... async def get_or_create_cdp_session(self, target_id: TargetID | None, focus: bool) -> CDPSession: ... async def set_extra_headers(self, headers: dict[str, str], target_id: TargetID | None) -> None: ... async def get_browser_state_summary(self, include_screenshot: bool, cached: bool, include_recent_events: bool) -> BrowserStateSummary: ... async def get_state_as_text(self) -> str: ... async def attach_all_watchdogs(self) -> None: ... async def connect(self, cdp_url: str | None) -> Self: ... async def _setup_proxy_auth(self) -> None: ... async def reconnect(self) -> None: ... async def _auto_reconnect(self, max_attempts: int) -> None: ... def _attach_ws_drop_callback(self) -> None: ... async def get_tabs(self) -> list[TabInfo]: ... async def get_current_target_info(self) -> TargetInfo | None: ... async def get_current_page_url(self) -> str: ... async def get_current_page_title(self) -> str: ... async def navigate_to(self, url: str, new_tab: bool) -> None: ... async def get_dom_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... def update_cached_selector_map(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> None: ... async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... async def get_dom_element_at_coordinates(self, x: int, y: int) -> EnhancedDOMTreeNode | None: ... async def get_target_id_from_url(self, url: str) -> TargetID: ... async def get_most_recently_opened_target_id(self) -> TargetID: ... def is_file_input(self, element: Any) -> bool: ... async def get_selector_map(self) -> dict[int, EnhancedDOMTreeNode]: ... async def get_index_by_id(self, element_id: str) -> int | None: ... async def get_index_by_class(self, class_name: str) -> int | None: ... async def remove_highlights(self) -> None: ... async def get_element_coordinates(self, backend_node_id: int, cdp_session: CDPSession) -> DOMRect | None: ... async def highlight_interaction_element(self, node: 'EnhancedDOMTreeNode') -> None: ... async def highlight_coordinate_click(self, x: int, y: int) -> None: ... async def add_highlights(self, selector_map: dict[int, 'EnhancedDOMTreeNode']) -> None: ... async def _close_extension_options_pages(self) -> None: ... async def send_demo_mode_log(self, message: str, level: str, metadata: dict[str, Any] | None) -> None: ... def downloaded_files(self) -> list[str]: ... async def _cdp_get_all_pages(self, include_http: bool, include_about: bool, include_pages: bool, include_iframes: bool, include_workers: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool) -> list[TargetInfo]: ... async def _cdp_create_new_page(self, url: str, background: bool, new_window: bool) -> str: ... async def _cdp_close_page(self, target_id: TargetID) -> None: ... async def _cdp_get_cookies(self) -> list[Cookie]: ... async def _cdp_set_cookies(self, cookies: list[Cookie]) -> None: ... async def _cdp_clear_cookies(self) -> None: ... async def _cdp_grant_permissions(self, permissions: list[str], origin: str | None) -> None: ... async def _cdp_set_geolocation(self, latitude: float, longitude: float, accuracy: float) -> None: ... async def _cdp_clear_geolocation(self) -> None: ... async def _cdp_add_init_script(self, script: str) -> str: ... async def _cdp_remove_init_script(self, identifier: str) -> None: ... async def _cdp_set_viewport(self, width: int, height: int, device_scale_factor: float, mobile: bool, target_id: str | None) -> None: ... async def _cdp_get_origins(self) -> list[dict[str, Any]]: ... async def _cdp_get_storage_state(self) -> dict: ... async def _cdp_navigate(self, url: str, target_id: TargetID | None) -> None: ... def _is_valid_target(target_info: TargetInfo, include_http: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool, include_about: bool, include_iframes: bool, include_pages: bool, include_workers: bool) -> bool: ... async def get_all_frames(self) -> tuple[dict[str, dict], dict[str, str]]: ... async def _populate_frame_metadata(self, all_frames: dict[str, dict], target_sessions: dict[str, str]) -> None: ... async def find_frame_target(self, frame_id: str, all_frames: dict[str, dict] | None) -> dict | None: ... async def cdp_client_for_target(self, target_id: TargetID) -> CDPSession: ... async def cdp_client_for_frame(self, frame_id: str) -> CDPSession: ... async def cdp_client_for_node(self, node: EnhancedDOMTreeNode) -> CDPSession: ... async def take_screenshot(self, path: str | None, full_page: bool, format: str, quality: int | None, clip: dict | None) -> bytes: ... async def screenshot_element(self, selector: str, path: str | None, format: str, quality: int | None) -> bytes: ... async def _get_element_bounds(self, selector: str) -> dict | None: ... # Task: Write a Python async method `get_target_id_from_tab_id` for the class `BrowserSession` to get the full-length TargetID from the truncated 4-char tab_id using SessionManager. Parameters: tab_id: str Returns: TargetID
async def get_target_id_from_tab_id(self, tab_id: str) -> TargetID: """Get the full-length TargetID from the truncated 4-char tab_id using SessionManager.""" if not self.session_manager: raise RuntimeError('SessionManager not initialized') for full_target_id in self.session_manager.get_all_target_ids(): if full_target_id.endswith(tab_id): if await self.session_manager.is_target_valid(full_target_id): return full_target_id # Stale target - Chrome should have sent detach event # If we're here, event listener will clean it up self.logger.debug(f'Found stale target {full_target_id}, skipping') raise ValueError(f'No TargetID found ending in tab_id=...{tab_id}')
function_complex
0
{"cognitive_complexity": 7, "loc": 14, "code_loc": 8, "docstring_loc": 1, "function_name": "get_target_id_from_tab_id", "class_name": "BrowserSession", "qualname": "BrowserSession.get_target_id_from_tab_id", "file_path": "browser_use/browser/session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/a2a/utils/test_task.py:TestExecute.test_emits_completed_event
# Context: from unittest.mock import AsyncMock, MagicMock, patch import pytest from crewai.a2a.utils.task import cancel, cancellable, execute def mock_agent() -> MagicMock: ... def mock_task(mock_context: MagicMock) -> MagicMock: ... def mock_context() -> MagicMock: ... def mock_event_queue() -> AsyncMock: ... async def clear_cache(mock_context: MagicMock) -> None: ... class TestCancellableDecorator: ... class TestCancel: ... class TestExecuteAndCancelIntegration: ... class TestExecute: async def test_successful_execution(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... async def test_emits_started_event(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... async def test_emits_failed_event_on_exception(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... async def test_emits_canceled_event_on_cancellation(self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock) -> None: ... # Task: Write a Python test method `test_emits_completed_event` in test class `TestExecute` to execute emits A2AServerTaskCompletedEvent on success. Module under test: __future__, typing, a2a.server.agent_execution
async def test_emits_completed_event( self, mock_agent: MagicMock, mock_context: MagicMock, mock_event_queue: AsyncMock, mock_task: MagicMock, ) -> None: """Execute emits A2AServerTaskCompletedEvent on success.""" with ( patch("crewai.a2a.utils.task.Task", return_value=mock_task), patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus, ): await execute(mock_agent, mock_context, mock_event_queue) second_call = mock_bus.emit.call_args_list[1] event = second_call[0][1] assert event.type == "a2a_server_task_completed" assert event.task_id == mock_context.task_id assert event.result == "Task completed successfully"
test
0
{"function_name": "test_emits_completed_event", "class_name": "TestExecute", "qualname": "TestExecute.test_emits_completed_event", "file_path": "lib/crewai/tests/a2a/utils/test_task.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["__future__", "typing", "a2a.server.agent_execution", "a2a.server.events", "a2a.types"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/tests/test_default_cluster_autoscaler_v2.py:TestClusterAutoscaling.test_get_node_resource_spec_and_count_from_zero
# Context: from unittest.mock import MagicMock, patch from ray.core.generated import autoscaler_pb2 from ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2 import ( DefaultClusterAutoscalerV2, _get_node_resource_spec_and_count, _NodeResourceSpec, ) class StubUtilizationGauge(ResourceUtilizationGauge): ... class TestClusterAutoscaling: def setup_class(self): ... def teardown_class(self): ... def test_get_node_resource_spec_and_count(self): ... def test_try_scale_up_cluster(self, cpu_util, gpu_util, mem_util): ... def test_try_scale_up_cluster_from_zero(self): ... def test_low_utilization_sends_current_allocation(self): ... def test_get_node_resource_spec_and_count_skips_max_count_zero(self): ... def test_get_node_resource_spec_and_count_missing_all_resources(self): ... def test_try_scale_up_respects_resource_limits(self, resource_limits, node_spec, existing_nodes, scale_up_increment, expected_nodes): ... def test_try_scale_up_respects_resource_limits_heterogeneous_nodes(self): ... def test_try_scale_up_existing_nodes_prioritized_over_delta(self): ... def test_try_scale_up_logs_info_message(self, propagate_logs, caplog): ... # Task: Write a Python test method `test_get_node_resource_spec_and_count_from_zero` in test class `TestClusterAutoscaling` to test that get_node_resource_spec_and_count can discover node types. Module under test: ray.core.generated, ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2, ray.data._internal.cluster_autoscaler.fake_autoscaling_coordinator
def test_get_node_resource_spec_and_count_from_zero(self): """Test that get_node_resource_spec_and_count can discover node types from cluster config even when there are zero worker nodes.""" # Simulate a cluster with only head node (no worker nodes) node_table = [ { "Resources": self._head_node, "Alive": True, }, ] # Create a mock cluster config with 2 worker node types cluster_config = autoscaler_pb2.ClusterConfig() # Node type 1: 4 CPU, 0 GPU, 1000 memory node_group_config1 = autoscaler_pb2.NodeGroupConfig() node_group_config1.resources["CPU"] = 4 node_group_config1.resources["memory"] = 1000 node_group_config1.max_count = 10 cluster_config.node_group_configs.append(node_group_config1) # Node type 2: 8 CPU, 2 GPU, 2000 memory node_group_config2 = autoscaler_pb2.NodeGroupConfig() node_group_config2.resources["CPU"] = 8 node_group_config2.resources["GPU"] = 2 node_group_config2.resources["memory"] = 2000 node_group_config2.max_count = 5 cluster_config.node_group_configs.append(node_group_config2) expected = { _NodeResourceSpec.of(cpu=4, gpu=0, mem=1000): 0, _NodeResourceSpec.of(cpu=8, gpu=2, mem=2000): 0, } with patch("ray.nodes", return_value=node_table): with patch( "ray._private.state.state.get_cluster_config", return_value=cluster_config, ): result = _get_node_resource_spec_and_count() assert result == expected
test
0
{"function_name": "test_get_node_resource_spec_and_count_from_zero", "class_name": "TestClusterAutoscaling", "qualname": "TestClusterAutoscaling.test_get_node_resource_spec_and_count_from_zero", "file_path": "python/ray/data/tests/test_default_cluster_autoscaler_v2.py", "repo_id": "ray-project/ray", "loc": 41, "tested_modules": ["ray.core.generated", "ray.data._internal.cluster_autoscaler.default_cluster_autoscaler_v2", "ray.data._internal.cluster_autoscaler.fake_autoscaling_coordinator", "ray.data._internal.cluster_autoscaler.resource_utilization_gauge", "ray.data._internal.execution.interfaces.execution_options"], "has_docstring": true, "runnable_level": "class_runnable"}
ccxt/ccxt:python/ccxt/static_dependencies/bip/utils/crypto/blake2.py:Blake2b.QuickDigest
# Context: import hashlib from typing import Union from ..misc import AlgoUtils class _Blake2bWithSpecificSize(ABC): ... class Blake2b32(_Blake2bWithSpecificSize): ... class Blake2b40(_Blake2bWithSpecificSize): ... class Blake2b160(_Blake2bWithSpecificSize): ... class Blake2b224(_Blake2bWithSpecificSize): ... class Blake2b256(_Blake2bWithSpecificSize): ... class Blake2b512(_Blake2bWithSpecificSize): ... class Blake2b: # Task: Write a Python method `QuickDigest` for the class `Blake2b` to compute the digest (quick version). Parameters: data: Union[bytes, str], digest_size: int, key: Union[bytes, str], salt: Union[bytes, str] Returns: bytes
def QuickDigest(data: Union[bytes, str], digest_size: int, key: Union[bytes, str] = b"", salt: Union[bytes, str] = b"") -> bytes: """ Compute the digest (quick version). Args: data (str or bytes) : Data digest_size (int) : Digest size key ((str or bytes, optional) : Key (default: empty) salt ((str or bytes, optional): Salt (default: empty) Returns: bytes: Computed digest """ return hashlib.blake2b(AlgoUtils.Encode(data), digest_size=digest_size, key=AlgoUtils.Encode(key), salt=AlgoUtils.Encode(salt)).digest()
function_simple
1
{"cognitive_complexity": 0, "loc": 20, "code_loc": 4, "docstring_loc": 12, "function_name": "QuickDigest", "class_name": "Blake2b", "qualname": "Blake2b.QuickDigest", "file_path": "python/ccxt/static_dependencies/bip/utils/crypto/blake2.py", "repo_id": "ccxt/ccxt", "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_prompt_caching.py:TestCollectCodeExecutionToolIds.test_no_code_execution_calls
# Context: from langchain_anthropic.chat_models import ( ChatAnthropic, _collect_code_execution_tool_ids, _is_code_execution_related_block, ) class FakeToolCallingModel(BaseChatModel): ... def test_anthropic_prompt_caching_middleware_initialization() -> None: ... def test_anthropic_prompt_caching_middleware_unsupported_model() -> None: ... async def test_anthropic_prompt_caching_middleware_async() -> None: ... async def test_anthropic_prompt_caching_middleware_async_unsupported_model() -> None: ... async def test_anthropic_prompt_caching_middleware_async_min_messages() -> None: ... async def test_anthropic_prompt_caching_middleware_async_with_system_prompt() -> None: ... async def test_anthropic_prompt_caching_middleware_async_default_values() -> None: ... class TestIsCodeExecutionRelatedBlock: ... class TestCollectCodeExecutionToolIds: def test_empty_messages(self) -> None: ... def test_single_code_execution_call(self) -> None: ... def test_multiple_code_execution_calls(self) -> None: ... def test_future_code_execution_version(self) -> None: ... def test_ignores_user_messages(self) -> None: ... def test_handles_string_content(self) -> None: ... # Task: Write a Python test method `test_no_code_execution_calls` in test class `TestCollectCodeExecutionToolIds` to test messages without any code_execution calls. Module under test: typing, langchain.agents.middleware.types, langchain_core.callbacks
def test_no_code_execution_calls(self) -> None: """Test messages without any code_execution calls.""" messages = [ { "role": "user", "content": [{"type": "text", "text": "Hello"}], }, { "role": "assistant", "content": [ { "type": "tool_use", "id": "toolu_regular", "name": "get_weather", "input": {"location": "NYC"}, } ], }, ] result = _collect_code_execution_tool_ids(messages) assert result == set()
test
1
{"function_name": "test_no_code_execution_calls", "class_name": "TestCollectCodeExecutionToolIds", "qualname": "TestCollectCodeExecutionToolIds.test_no_code_execution_calls", "file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_prompt_caching.py", "repo_id": "langchain-ai/langchain", "loc": 21, "tested_modules": ["typing", "langchain.agents.middleware.types", "langchain_core.callbacks", "langchain_core.language_models", "langchain_core.messages"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/test/footsies_suppress_unity_logs.py:TestFootsies.test_default_supress_output_mode
# Context: import os import time from pathlib import Path def _create_env(config_overrides): ... def capture_stdout_stderr(): ... class TestFootsies(unittest.TestCase): def test_enable_output_mode(self): ... # Task: Write a Python test method `test_default_supress_output_mode` in test class `TestFootsies` to verify the behavior of `default_supress_output_mode`. Module under test: contextlib, pathlib, ray.rllib.env
def test_default_supress_output_mode(self): with capture_stdout_stderr() as log_path: env = _create_env({}) time.sleep(2) # Give Unity time to write output env.close() # Give a bit more time for any buffered output to be written time.sleep(0.5) # Read the captured output with open(log_path, "r") as f: captured_output = f.read() assert ( "`log_unity_output` not set in environment config, not logging output by default" in captured_output ) assert "[UnityMemory]" not in captured_output # Clean up if Path(log_path).exists(): os.unlink(log_path)
test
0
{"function_name": "test_default_supress_output_mode", "class_name": "TestFootsies", "qualname": "TestFootsies.test_default_supress_output_mode", "file_path": "rllib/examples/envs/classes/multi_agent/footsies/test/footsies_suppress_unity_logs.py", "repo_id": "ray-project/ray", "loc": 21, "tested_modules": ["contextlib", "pathlib", "ray.rllib.env", "ray.rllib.examples.envs.classes.multi_agent.footsies.footsies_env"], "has_docstring": false, "runnable_level": "file_runnable"}
huggingface/diffusers:src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py:HunyuanVideo15ImageToVideoPipeline.prepare_cond_latents_and_mask
# Context: import PIL import torch def format_text_input(prompt: list[str], system_message: str) -> list[dict[str, Any]]: ... def extract_glyph_texts(prompt: str) -> list[str]: ... def retrieve_latents(encoder_output: torch.Tensor, generator: torch.Generator | None, sample_mode: str): ... def retrieve_timesteps(scheduler, num_inference_steps: int | None, device: str | torch.device | None, timesteps: list[int] | None, sigmas: list[float] | None, **kwargs): ... class HunyuanVideo15ImageToVideoPipeline(DiffusionPipeline): model_cpu_offload_seq = "image_encoder->text_encoder->transformer->vae" def __init__( self, text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, transformer: HunyuanVideo15Transformer3DModel, vae: AutoencoderKLHunyuanVideo15, scheduler: FlowMatchEulerDiscreteScheduler, text_encoder_2: T5EncoderModel, tokenizer_2: ByT5Tokenizer, guider: ClassifierFreeGuidance, image_encoder: SiglipVisionModel, feature_extractor: SiglipImageProcessor, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, transformer=transformer, scheduler=scheduler, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, guider=guider, image_encoder=image_encoder, feature_extractor=feature_extractor, ) self.vae_scale_factor_temporal = self.vae.temporal_compression_ratio if getattr(self, "vae", None) else 4 self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio if getattr(self, "vae", None) else 16 self.video_processor = HunyuanVideo15ImageProcessor( vae_scale_factor=self.vae_scale_factor_spatial, do_resize=False, do_convert_rgb=True ) self.target_size = self.transformer.config.target_size if getattr(self, "transformer", None) else 640 self.vision_states_dim = ( self.transformer.config.image_embed_dim if getattr(self, "transformer", None) else 1152 ) self.num_channels_latents = self.vae.config.latent_channels if hasattr(self, "vae") else 32 # fmt: off self.system_message = "You are a helpful assistant. Describe the video by detailing the following aspects: \ 1. The main content and theme of the video. \ 2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects. \ 3. Actions, events, behaviors temporal relationships, physical movement changes of the objects. \ 4. background environment, light, style and atmosphere. \ 5. camera angles, movements, and transitions used in the video." # fmt: on self.prompt_template_encode_start_idx = 108 self.tokenizer_max_length = 1000 self.tokenizer_2_max_length = 256 self.vision_num_semantic_tokens = 729 def _get_mllm_prompt_embeds(text_encoder: Qwen2_5_VLTextModel, tokenizer: Qwen2Tokenizer, prompt: str | list[str], device: torch.device, tokenizer_max_length: int, num_hidden_layers_to_skip: int, system_message: str, crop_start: int) -> tuple[torch.Tensor, torch.Tensor]: ... def _get_byt5_prompt_embeds(tokenizer: ByT5Tokenizer, text_encoder: T5EncoderModel, prompt: str | list[str], device: torch.device, tokenizer_max_length: int): ... def _get_image_latents(vae: AutoencoderKLHunyuanVideo15, image_processor: HunyuanVideo15ImageProcessor, image: PIL.Image.Image, height: int, width: int, device: torch.device) -> torch.Tensor: ... def _get_image_embeds(image_encoder: SiglipVisionModel, feature_extractor: SiglipImageProcessor, image: PIL.Image.Image, device: torch.device) -> torch.Tensor: ... def encode_image(self, image: PIL.Image.Image, batch_size: int, device: torch.device, dtype: torch.dtype) -> torch.Tensor: ... def encode_prompt(self, prompt: str | list[str], device: torch.device | None, dtype: torch.dtype | None, batch_size: int, num_videos_per_prompt: int, prompt_embeds: torch.Tensor | None, prompt_embeds_mask: torch.Tensor | None, prompt_embeds_2: torch.Tensor | None, prompt_embeds_mask_2: torch.Tensor | None): ... def check_inputs(self, prompt, image: PIL.Image.Image, negative_prompt, prompt_embeds, negative_prompt_embeds, prompt_embeds_mask, negative_prompt_embeds_mask, prompt_embeds_2, prompt_embeds_mask_2, negative_prompt_embeds_2, negative_prompt_embeds_mask_2): ... def prepare_latents(self, batch_size: int, num_channels_latents: int, height: int, width: int, num_frames: int, dtype: torch.dtype | None, device: torch.device | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None) -> torch.Tensor: ... def num_timesteps(self): ... def attention_kwargs(self): ... def current_timestep(self): ... def interrupt(self): ... def __call__(self, image: PIL.Image.Image, prompt: str | list[str], negative_prompt: str | list[str], num_frames: int, num_inference_steps: int, sigmas: list[float], num_videos_per_prompt: int | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None, prompt_embeds: torch.Tensor | None, prompt_embeds_mask: torch.Tensor | None, negative_prompt_embeds: torch.Tensor | None, negative_prompt_embeds_mask: torch.Tensor | None, prompt_embeds_2: torch.Tensor | None, prompt_embeds_mask_2: torch.Tensor | None, negative_prompt_embeds_2: torch.Tensor | None, negative_prompt_embeds_mask_2: torch.Tensor | None, output_type: str | None, return_dict: bool, attention_kwargs: dict[str, Any] | None): ... # Task: Write a Python method `prepare_cond_latents_and_mask` for the class `HunyuanVideo15ImageToVideoPipeline` to prepare conditional latents and mask for t2v generation. Parameters: latents: torch.Tensor, image: PIL.Image.Image, batch_size: int, height: int, width: int, dtype: torch.dtype, device: torch.device
def prepare_cond_latents_and_mask( self, latents: torch.Tensor, image: PIL.Image.Image, batch_size: int, height: int, width: int, dtype: torch.dtype, device: torch.device, ): """ Prepare conditional latents and mask for t2v generation. Args: latents: Main latents tensor (B, C, F, H, W) Returns: tuple: (cond_latents_concat, mask_concat) - both are zero tensors for t2v """ batch, channels, frames, height, width = latents.shape image_latents = self._get_image_latents( vae=self.vae, image_processor=self.video_processor, image=image, height=height, width=width, device=device, ) latent_condition = image_latents.repeat(batch_size, 1, frames, 1, 1) latent_condition[:, :, 1:, :, :] = 0 latent_condition = latent_condition.to(device=device, dtype=dtype) latent_mask = torch.zeros(batch, 1, frames, height, width, dtype=dtype, device=device) latent_mask[:, :, 0, :, :] = 1.0 return latent_condition, latent_mask
function_simple
1
{"cognitive_complexity": 0, "loc": 39, "code_loc": 15, "docstring_loc": 9, "function_name": "prepare_cond_latents_and_mask", "class_name": "HunyuanVideo15ImageToVideoPipeline", "qualname": "HunyuanVideo15ImageToVideoPipeline.prepare_cond_latents_and_mask", "file_path": "src/diffusers/pipelines/hunyuan_video1_5/pipeline_hunyuan_video1_5_image2video.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/nvfp4_utils.py:prepare_weights_for_nvfp4_flashinfer_trtllm
# Context: import torch from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a class NvFp4LinearBackend(Enum): ... def select_nvfp4_linear_backend() -> NvFp4LinearBackend: ... def prepare_weights_for_nvfp4_cutlass(weight: torch.Tensor, weight_scale: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, int]: ... def prepare_weights_for_nvfp4_fbgemm(weight: torch.Tensor, weight_scale: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: ... def convert_to_nvfp4_linear_kernel_format(backend: NvFp4LinearBackend, layer: torch.nn.Module) -> None: ... def apply_nvfp4_linear(backend: NvFp4LinearBackend, layer: torch.nn.Module, x: torch.Tensor, bias: torch.Tensor | None) -> torch.Tensor: ... def swizzle_blockscale(scale: torch.Tensor) -> torch.Tensor: ... def cutlass_fp4_supported() -> bool: ... def pad_nvfp4_weight_for_cutlass(weight: torch.Tensor, alignment: int) -> tuple[torch.Tensor, int]: ... def pad_nvfp4_activation_for_cutlass(x_fp4: torch.Tensor, weights_padding_bytes: int) -> torch.Tensor: ... def slice_nvfp4_output(out: torch.Tensor, output_size: int) -> torch.Tensor: ... # Task: Write a Python function `prepare_weights_for_nvfp4_flashinfer_trtllm` to prepare weights and scales for FlashInfer TRTLLM FP4 GEMM. Parameters: weight: torch.Tensor, weight_scale: torch.Tensor Returns: tuple[torch.Tensor, torch.Tensor]
def prepare_weights_for_nvfp4_flashinfer_trtllm( weight: torch.Tensor, weight_scale: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: """Prepare weights and scales for FlashInfer TRTLLM FP4 GEMM.""" from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a epilogue_tile_m = 128 shuffled_weight = shuffle_matrix_a(weight.view(torch.uint8), epilogue_tile_m) shuffled_weight_scale = ( shuffle_matrix_sf_a(weight_scale.view(torch.uint8), epilogue_tile_m) .reshape(weight_scale.shape) .view(torch.float8_e4m3fn) ) return shuffled_weight, shuffled_weight_scale
function_simple
1
{"cognitive_complexity": 0, "loc": 16, "code_loc": 9, "docstring_loc": 1, "function_name": "prepare_weights_for_nvfp4_flashinfer_trtllm", "class_name": null, "qualname": "prepare_weights_for_nvfp4_flashinfer_trtllm", "file_path": "vllm/model_executor/layers/quantization/utils/nvfp4_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/_internal/execution/operators/hash_shuffle.py:ShuffleAggregation.is_compacting
Write a Python method `is_compacting` for the class `ShuffleAggregation` to returns whether this aggregation is capable of compacting partial.
def is_compacting(cls): """Returns whether this aggregation is capable of compacting partial partition's shards list. """ return False
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 1, "docstring_loc": 3, "function_name": "is_compacting", "class_name": "ShuffleAggregation", "qualname": "ShuffleAggregation.is_compacting", "file_path": "python/ray/data/_internal/execution/operators/hash_shuffle.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "self_contained"}
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py:OffloadingConnectorScheduler.request_finished
# Context: from typing import Any from vllm.v1.request import Request class OffloadingOperationMetrics: ... class OffloadingConnectorStats(KVConnectorStats): ... class OffloadingConnectorMetadata(KVConnectorMetadata): ... class OffloadingConnector(KVConnectorBase_V1): ... class OffloadingConnectorWorker: ... class OffloadPromMetrics(KVConnectorPromMetrics): ... class OffloadingConnectorScheduler: def __init__(self, spec: OffloadingSpec): self.gpu_block_size = spec.gpu_block_size self.offloaded_block_size = spec.offloaded_block_size self.block_size_factor = self.offloaded_block_size // self.gpu_block_size self.manager: OffloadingManager = spec.get_manager() self._requests: dict[ReqId, Request] = {} # list of GPU block IDs per request self._request_block_ids: dict[ReqId, list[int]] = {} # requests to load for the current scheduler step self._reqs_to_load: dict[ReqId, TransferSpec] = {} # request blocks are stored in order # index of next block (of size offloaded_block_size) to offload self._next_stored_block_idx: dict[ReqId, int] = {} # if GPU prefix caching is enabled, # track loaded blocks to avoid redundant loads self._blocks_being_loaded: set[BlockHash] | None = ( set() if spec.vllm_config.cache_config.enable_prefix_caching else None ) # request ID -> set(block hashes being stored/load) self._reqs_being_stored = defaultdict[ReqId, set[BlockHash]](set) self._reqs_being_loaded = defaultdict[ReqId, set[BlockHash]](set) def _get_block_hashes(self, req: Request, start_idx: int, end_idx: int | None) -> Iterable[BlockHash]: ... def get_num_new_matched_tokens(self, request: Request, num_computed_tokens: int) -> tuple[int | None, bool]: ... def update_state_after_alloc(self, request: Request, blocks: KVCacheBlocks, num_external_tokens: int): ... def _get_reqs_to_store(self, scheduler_output: SchedulerOutput): ... def build_connector_meta(self, scheduler_output: SchedulerOutput) -> KVConnectorMetadata: ... def update_connector_output(self, connector_output: KVConnectorOutput): ... def take_events(self) -> Iterable[KVCacheEvent]: ... # Task: Write a Python method `request_finished` for the class `OffloadingConnectorScheduler` to called when a request has finished, before its blocks are freed. Parameters: request: Request, block_ids: list[int] Returns: tuple[bool, dict[str, Any] | None]
def request_finished( self, request: Request, block_ids: list[int], ) -> tuple[bool, dict[str, Any] | None]: """ Called when a request has finished, before its blocks are freed. Returns: True if the request is being saved/sent asynchronously and blocks should not be freed until the request_id is returned from get_finished(). Optional KVTransferParams to be included in the request outputs returned by the engine. """ req_id = request.request_id self._requests.pop(req_id, None) self._request_block_ids.pop(req_id, None) self._next_stored_block_idx.pop(req_id, None) request_being_stored = req_id in self._reqs_being_stored return request_being_stored, None
function_simple
1
{"cognitive_complexity": 0, "loc": 22, "code_loc": 6, "docstring_loc": 10, "function_name": "request_finished", "class_name": "OffloadingConnectorScheduler", "qualname": "OffloadingConnectorScheduler.request_finished", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/redis/tests/unit/redis/triggers/test_redis_await_message.py:TestAwaitMessageTrigger.test_trigger_serialization
# Context: from airflow.providers.redis.triggers.redis_await_message import AwaitMessageTrigger class TestAwaitMessageTrigger: async def test_trigger_run_succeed(self, mock_redis_conn): ... async def test_trigger_run_succeed_with_bytes(self, mock_redis_conn): ... async def test_trigger_run_fail(self, mock_redis_conn): ... # Task: Write a Python test method `test_trigger_serialization` in test class `TestAwaitMessageTrigger` to verify the behavior of `trigger_serialization`. Module under test: __future__, airflow.providers.redis.triggers.redis_await_message
def test_trigger_serialization(self): trigger = AwaitMessageTrigger( channels=["test_channel"], redis_conn_id="redis_default", poll_interval=30, ) assert isinstance(trigger, AwaitMessageTrigger) classpath, kwargs = trigger.serialize() assert classpath == "airflow.providers.redis.triggers.redis_await_message.AwaitMessageTrigger" assert kwargs == dict( channels=["test_channel"], redis_conn_id="redis_default", poll_interval=30, )
test
1
{"function_name": "test_trigger_serialization", "class_name": "TestAwaitMessageTrigger", "qualname": "TestAwaitMessageTrigger.test_trigger_serialization", "file_path": "providers/redis/tests/unit/redis/triggers/test_redis_await_message.py", "repo_id": "apache/airflow", "loc": 17, "tested_modules": ["__future__", "airflow.providers.redis.triggers.redis_await_message"], "has_docstring": false, "runnable_level": "project_runnable"}
infiniflow/ragflow:common/data_source/utils.py:make_paginated_slack_api_call
# Context: from collections.abc import Callable, Generator, Iterator, Mapping, Sequence from typing import IO, Any, Generic, Iterable, Optional, Protocol, TypeVar, cast from slack_sdk.web import SlackResponse def datetime_from_string(datetime_string: str) -> datetime: ... def is_valid_image_type(mime_type: str) -> bool: ... def _handle_http_error(e: requests.HTTPError, attempt: int) -> int: ... def update_param_in_path(path: str, param: str, value: str) -> str: ... def build_confluence_document_id(base_url: str, content_url: str, is_cloud: bool) -> str: ... def get_single_param_from_url(url: str, param: str) -> str | None: ... def get_start_param_from_url(url: str) -> int: ... def wrap_request_to_handle_ratelimiting(request_fn: R, default_wait_time_sec: int, max_waits: int) -> R: ... class _RateLimitedRequest: ... def create_s3_client(bucket_type: BlobType, credentials: dict[str, Any], european_residency: bool) -> S3Client: ... def detect_bucket_region(s3_client: S3Client, bucket_name: str) -> str | None: ... def download_object(s3_client: S3Client, bucket_name: str, key: str, size_threshold: int | None) -> bytes | None: ... def read_stream_with_limit(body: Any, key: str, size_threshold: int) -> bytes | None: ... def _extract_onyx_metadata(line: str) -> dict | None: ... def read_text_file(file: IO, encoding: str, errors: str, ignore_onyx_metadata: bool) -> tuple[str, dict]: ... def get_blob_link(bucket_type: BlobType, s3_client: S3Client, bucket_name: str, key: str, bucket_region: str | None) -> str: ... def extract_size_bytes(obj: Mapping[str, Any]) -> int | None: ... def get_file_ext(file_name: str) -> str: ... def is_accepted_file_ext(file_ext: str, extension_type: OnyxExtensionType) -> bool: ... def detect_encoding(file: IO[bytes]) -> str: ... def get_markitdown_converter(): ... def to_bytesio(stream: IO[bytes]) -> BytesIO: ... def get_base_url(token: str) -> str: ... def get_message_link(event: dict, client: WebClient, channel_id: str) -> str: ... def make_slack_api_call(call: Callable[..., SlackResponse], **kwargs) -> SlackResponse: ... def _make_slack_api_call_paginated(call: Callable[..., SlackResponse]) -> Callable[..., Generator[dict[str, Any], None, None]]: ... def is_atlassian_date_error(e: Exception) -> bool: ... def expert_info_from_slack_id(user_id: str | None, client: WebClient, user_cache: dict[str, BasicExpertInfo | None]) -> BasicExpertInfo | None: ... class SlackTextCleaner: ... def is_mail_service_disabled_error(error: HttpError) -> bool: ... def build_time_range_query(time_range_start: SecondsSinceUnixEpoch | None, time_range_end: SecondsSinceUnixEpoch | None) -> str | None: ... def clean_email_and_extract_name(email: str) -> tuple[str, str | None]: ... def get_message_body(payload: dict[str, Any]) -> str: ... def time_str_to_utc(time_str: str): ... def gmail_time_str_to_utc(time_str: str): ... def batch_generator(items: Iterable[T], batch_size: int, pre_batch_yield: Callable[[list[T]], None] | None) -> Generator[list[T], None, None]: ... def fetch_notion_data(url: str, headers: dict[str, str], method: str, json_data: Optional[dict]) -> dict[str, Any]: ... def properties_to_str(properties: dict[str, Any]) -> str: ... def filter_pages_by_time(pages: list[dict[str, Any]], start: float, end: float, filter_field: str) -> list[dict[str, Any]]: ... def _load_all_docs(connector: CheckpointedConnector[CT], load: LoadFunction) -> list[Document]: ... def load_all_docs_from_checkpoint_connector(connector: CheckpointedConnector[CT], start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> list[Document]: ... def is_atlassian_cloud_url(url: str) -> bool: ... def get_cloudId(base_url: str) -> str: ... def scoped_url(url: str, product: str) -> str: ... def process_confluence_user_profiles_override(confluence_user_email_override: list[dict[str, str]]) -> list[ConfluenceUser]: ... def confluence_refresh_tokens(client_id: str, client_secret: str, cloud_id: str, refresh_token: str) -> dict[str, Any]: ... class TimeoutThread(threading.Thread, Generic[R]): ... def run_with_timeout(timeout: float, func: Callable[..., R], *args, **kwargs) -> R: ... def validate_attachment_filetype(attachment: dict[str, Any]) -> bool: ... class CallableProtocol(Protocol): ... def run_functions_tuples_in_parallel(functions_with_args: Sequence[tuple[CallableProtocol, tuple[Any, ...]]], allow_failures: bool, max_workers: int | None) -> list[Any]: ... def _next_or_none(ind: int, gen: Iterator[R]) -> tuple[int, R | None]: ... def parallel_yield(gens: list[Iterator[R]], max_workers: int) -> Iterator[R]: ... def sanitize_filename(name: str, extension: str) -> str: ... class _RateLimitDecorator: ... def retry_builder(tries: int, delay: float, max_delay: float | None, backoff: float, jitter: tuple[float, float] | float, exceptions: type[Exception] | tuple[type[Exception], ...]) -> Callable[[F], F]: ... # Task: Write a Python function `make_paginated_slack_api_call` to make paginated Slack API call. Parameters: call: Callable[..., SlackResponse] Returns: Generator[dict[str, Any], None, None]
def make_paginated_slack_api_call(call: Callable[..., SlackResponse], **kwargs: Any) -> Generator[dict[str, Any], None, None]: """Make paginated Slack API call""" return _make_slack_api_call_paginated(call)(**kwargs)
function_simple
1
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "make_paginated_slack_api_call", "class_name": null, "qualname": "make_paginated_slack_api_call", "file_path": "common/data_source/utils.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/tests/batch/gpu/stages/test_serve_deployment_stage.py:test_serve_deployment_invalid_method
# Context: import pytest from ray.llm._internal.batch.stages.serve_deployment_stage import ( ServeDeploymentStageUDF, ) from ray.serve.llm.openai_api_models import ChatCompletionRequest, CompletionRequest def mock_serve_deployment_handle(): ... async def test_serve_deployment_udf_methods(mock_serve_deployment_handle, method, test_data): ... async def test_serve_deployment_missing_dtype(mock_serve_deployment_handle, dtype_mapping): ... async def test_serve_udf_default_raises_on_error(mock_serve_deployment_handle): ... async def test_serve_udf_continue_on_error_yields_error_row(mock_serve_deployment_handle): ... async def test_serve_udf_mixed_success_and_error(mock_serve_deployment_handle): ... async def test_serve_udf_fatal_errors_always_propagate(mock_serve_deployment_handle, fatal_error): ... async def test_serve_udf_unknown_errors_propagate(mock_serve_deployment_handle): ... async def test_serve_udf_success_with_continue_on_error_includes_none_error(mock_serve_deployment_handle): ... # Task: Write a Python test function `test_serve_deployment_invalid_method` to test that invalid method raises error at runtime. Module under test: ray.exceptions, ray.llm._internal.batch.stages.serve_deployment_stage, ray.serve._private.common
async def test_serve_deployment_invalid_method(mock_serve_deployment_handle): """Test that invalid method raises error at runtime.""" # Set up the mock to simulate a method that doesn't exist mock_serve_deployment_handle.invalid_method = None udf = ServeDeploymentStageUDF( data_column="__data", expected_input_keys=["method", "request_kwargs"], deployment_name="test_deployment", app_name="test_app", dtype_mapping={ "CompletionRequest": CompletionRequest, }, ) batch = { "__data": [ { "method": "invalid_method", "dtype": "CompletionRequest", "request_kwargs": {"prompt": "Hello", "temperature": 0.7}, } ] } with pytest.raises( ValueError, match="Method invalid_method not found in the serve deployment." ): async for _ in udf(batch): pass
test
0
{"function_name": "test_serve_deployment_invalid_method", "class_name": null, "qualname": "test_serve_deployment_invalid_method", "file_path": "python/ray/llm/tests/batch/gpu/stages/test_serve_deployment_stage.py", "repo_id": "ray-project/ray", "loc": 30, "tested_modules": ["ray.exceptions", "ray.llm._internal.batch.stages.serve_deployment_stage", "ray.serve._private.common", "ray.serve.exceptions", "ray.serve.llm.openai_api_models"], "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/data/util/data_batch_conversion.py:_unwrap_ndarray_object_type_if_needed
# Context: import numpy as np def _lazy_import_pandas(): ... class BatchFormat(str, Enum): ... def _convert_batch_type_to_pandas(data: DataBatchType, cast_tensor_columns: bool) -> 'pd.DataFrame': ... def _convert_pandas_to_batch_type(data: 'pd.DataFrame', type: BatchFormat, cast_tensor_columns: bool) -> DataBatchType: ... def _convert_batch_type_to_numpy(data: DataBatchType) -> Union[np.ndarray, Dict[str, np.ndarray]]: ... def _ndarray_to_column(arr: np.ndarray) -> Union['pd.Series', List[np.ndarray]]: ... def _cast_ndarray_columns_to_tensor_extension(df: 'pd.DataFrame') -> 'pd.DataFrame': ... def _cast_tensor_columns_to_ndarrays(df: 'pd.DataFrame') -> 'pd.DataFrame': ... # Task: Write a Python function `_unwrap_ndarray_object_type_if_needed` to unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single. Parameters: arr: np.ndarray Returns: np.ndarray
def _unwrap_ndarray_object_type_if_needed(arr: np.ndarray) -> np.ndarray: """Unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single contiguous ndarray, if needed/possible. """ if arr.dtype.type is np.object_: try: # Try to convert the NumPy ndarray to a non-object dtype. arr = np.array([np.asarray(v) for v in arr]) except Exception: # This may fail if the subndarrays are of heterogeneous shape pass return arr
function_simple
0
{"cognitive_complexity": 2, "loc": 12, "code_loc": 6, "docstring_loc": 3, "function_name": "_unwrap_ndarray_object_type_if_needed", "class_name": null, "qualname": "_unwrap_ndarray_object_type_if_needed", "file_path": "python/ray/data/util/data_batch_conversion.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "plib_runnable"}
huggingface/transformers:src/transformers/masking_utils.py:sliding_window_causal_mask_function
# Context: from collections.abc import Callable def and_masks(*mask_functions) -> Callable: ... def or_masks(*mask_functions) -> Callable: ... def causal_mask_function(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: ... def bidirectional_mask_function(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: ... def sliding_window_overlay(sliding_window: int) -> Callable: ... def chunked_overlay(chunk_size: int, left_padding: torch.Tensor) -> Callable: ... def sliding_window_bidirectional_overlay(sliding_window: int) -> Callable: ... def sliding_window_bidirectional_mask_function(sliding_window: int) -> Callable: ... def chunked_causal_mask_function(chunk_size: int, left_padding: torch.Tensor) -> Callable: ... def padding_mask_function(padding_mask: torch.Tensor) -> Callable: ... def packed_sequence_mask_function(packed_sequence_mask: torch.Tensor) -> Callable: ... def add_offsets_to_mask_function(mask_function: Callable, q_offset: int, kv_offset: int) -> Callable: ... def prepare_padding_mask(attention_mask: torch.Tensor | None, kv_length: int, kv_offset: int) -> torch.Tensor | None: ... def _can_skip_causal_mask_xpu(padding_mask: torch.Tensor | None, query_length: int, kv_length: int, local_attention_size: int | None) -> bool: ... def _ignore_causal_mask_sdpa(padding_mask: torch.Tensor | None, query_length: int, kv_length: int, kv_offset: int, local_attention_size: int | None) -> bool: ... def _can_skip_bidirectional_mask_xpu(padding_mask: torch.Tensor | None, kv_length: int, local_attention_size: int | None) -> bool: ... def _ignore_bidirectional_mask_sdpa(padding_mask: torch.Tensor | None, kv_length: int, local_attention_size: int | None) -> bool: ... def _vmap_expansion_sdpa(mask_function: Callable) -> Callable: ... def _non_vmap_expansion_sdpa(batch_indices: torch.Tensor, head_indices: torch.Tensor, q_indices: torch.Tensor, kv_indices: torch.Tensor): ... def sdpa_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, local_size: int | None, allow_is_causal_skip: bool, allow_is_bidirectional_skip: bool, allow_torch_fix: bool, use_vmap: bool, **kwargs) -> torch.Tensor | None: ... def eager_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, dtype: torch.dtype, allow_is_bidirectional_skip: bool, use_vmap: bool, **kwargs) -> torch.Tensor: ... def flash_attention_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, **kwargs): ... def flex_attention_mask(batch_size: int, cache_position: torch.Tensor, kv_length: int, kv_offset: int, mask_function: Callable, attention_mask: torch.Tensor | None, **kwargs) -> BlockMask: ... class AttentionMaskInterface(GeneralInterface): ... def find_packed_sequence_indices(position_ids: torch.Tensor) -> torch.Tensor | None: ... def _preprocess_mask_arguments(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | BlockMask | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, layer_idx: int | None) -> tuple[bool, torch.Tensor | BlockMask | None, int, int]: ... def create_causal_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_bidirectional_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, encoder_hidden_states: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_sliding_window_causal_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_bidirectional_sliding_window_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_chunked_causal_mask(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None) -> torch.Tensor | BlockMask | None: ... def create_masks_for_generate(config: PreTrainedConfig, inputs_embeds: torch.Tensor, attention_mask: torch.Tensor | None, cache_position: torch.Tensor, past_key_values: Cache | None, position_ids: torch.Tensor | None, or_mask_function: Callable | None, and_mask_function: Callable | None, **kwargs): ... def get_style(style): ... def tensor_to_mask_visual(original_tensor: torch.Tensor, grid_size, style) -> str: ... class AttentionMask(torch.Tensor): ... # Task: Write a Python function `sliding_window_causal_mask_function` to this return the mask_function function to create a sliding window mask. Parameters: sliding_window: int Returns: Callable
def sliding_window_causal_mask_function(sliding_window: int) -> Callable: """ This return the mask_function function to create a sliding window mask. """ return and_masks(sliding_window_overlay(sliding_window), causal_mask_function)
function_simple
0
{"cognitive_complexity": 0, "loc": 5, "code_loc": 1, "docstring_loc": 3, "function_name": "sliding_window_causal_mask_function", "class_name": null, "qualname": "sliding_window_causal_mask_function", "file_path": "src/transformers/masking_utils.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py:SerperDevTool._make_api_request
# Context: import json import os from typing import Any, TypedDict import requests class KnowledgeGraph(TypedDict): ... class Sitelink(TypedDict): ... class OrganicResult(TypedDict): ... class PeopleAlsoAskResult(TypedDict): ... class RelatedSearchResult(TypedDict): ... class NewsResult(TypedDict): ... class SearchParameters(TypedDict): ... class FormattedResults(TypedDict): ... def _save_results_to_file(content: str) -> None: ... class SerperDevToolSchema(BaseModel): ... class SerperDevTool(BaseTool): def _get_search_url(self, search_type: str) -> str: ... def _process_knowledge_graph(kg: dict[str, Any]) -> KnowledgeGraph: ... def _process_organic_results(self, organic_results: list[dict[str, Any]]) -> list[OrganicResult]: ... def _process_people_also_ask(self, paa_results: list[dict[str, Any]]) -> list[PeopleAlsoAskResult]: ... def _process_related_searches(self, related_results: list[dict[str, Any]]) -> list[RelatedSearchResult]: ... def _process_news_results(self, news_results: list[dict[str, Any]]) -> list[NewsResult]: ... def _process_search_results(self, results: dict[str, Any], search_type: str) -> dict[str, Any]: ... def _run(self, **kwargs) -> FormattedResults: ... # Task: Write a Python method `_make_api_request` for the class `SerperDevTool` to make API request to Serper. Parameters: search_query: str, search_type: str Returns: dict[str, Any]
def _make_api_request(self, search_query: str, search_type: str) -> dict[str, Any]: """Make API request to Serper.""" search_url = self._get_search_url(search_type) payload = {"q": search_query, "num": self.n_results} if self.country != "": payload["gl"] = self.country if self.location != "": payload["location"] = self.location if self.locale != "": payload["hl"] = self.locale headers = { "X-API-KEY": os.environ["SERPER_API_KEY"], "content-type": "application/json", } response = None try: response = requests.post( search_url, headers=headers, json=payload, timeout=10 ) response.raise_for_status() results = response.json() if not results: logger.error("Empty response from Serper API") raise ValueError("Empty response from Serper API") return results except requests.exceptions.RequestException as e: error_msg = f"Error making request to Serper API: {e}" if response is not None and hasattr(response, "content"): error_msg += f"\nResponse content: {response.content.decode('utf-8', errors='replace')}" logger.error(error_msg) raise except json.JSONDecodeError as e: if response is not None and hasattr(response, "content"): logger.error(f"Error decoding JSON response: {e}") logger.error( f"Response content: {response.content.decode('utf-8', errors='replace')}" ) else: logger.error( f"Error decoding JSON response: {e} (No response content available)" ) raise
function_complex
0
{"cognitive_complexity": 14, "loc": 45, "code_loc": 40, "docstring_loc": 1, "function_name": "_make_api_request", "class_name": "SerperDevTool", "qualname": "SerperDevTool._make_api_request", "file_path": "lib/crewai-tools/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:ci/ray_ci/test_ray_image.py:TestValidateInvalid.test_invalid_platform_for_ray_llm
# Context: import pytest from ci.ray_ci.ray_image import IMAGE_TYPE_CONFIG, RayImage, RayImageError class TestWandaImageName: ... class TestArchSuffix: ... class TestRepo: ... class TestVariationSuffix: ... class TestValidateValid: ... class TestImageTypeConfig: ... class TestValidateInvalid: def test_unknown_image_type(self): ... def test_invalid_python_for_ray_llm(self): ... def test_invalid_platform_for_ray(self): ... def test_invalid_architecture_for_ray_llm(self): ... # Task: Write a Python test method `test_invalid_platform_for_ray_llm` in test class `TestValidateInvalid` to verify the behavior of `invalid_platform_for_ray_llm`. Module under test: ci.ray_ci.configs, ci.ray_ci.docker_container, ci.ray_ci.ray_image
def test_invalid_platform_for_ray_llm(self): with pytest.raises(RayImageError, match="Invalid platform cpu for ray-llm"): RayImage("ray-llm", "3.11", "cpu").validate()
test
0
{"function_name": "test_invalid_platform_for_ray_llm", "class_name": "TestValidateInvalid", "qualname": "TestValidateInvalid.test_invalid_platform_for_ray_llm", "file_path": "ci/ray_ci/test_ray_image.py", "repo_id": "ray-project/ray", "loc": 3, "tested_modules": ["ci.ray_ci.configs", "ci.ray_ci.docker_container", "ci.ray_ci.ray_image"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/vaultgemma/test_modeling_vaultgemma.py:VaultGemmaIntegrationTest.test_export_static_cache
# Context: import pytest from packaging import version from transformers import ( AutoModelForCausalLM, AutoTokenizer, DynamicCache, is_torch_available, pipeline, ) from transformers.generation.configuration_utils import GenerationConfig from transformers.testing_utils import ( Expectations, cleanup, is_flash_attn_2_available, is_kernels_available, is_torch_xpu_available, require_torch, require_torch_accelerator, slow, torch_device, ) import torch from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM class VaultGemmaModelTester(CausalLMModelTester): ... class VaultGemmaModelTest(CausalLMModelTest, unittest.TestCase): ... class VaultGemmaIntegrationTest(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] def setUp(self): ... def tearDown(self): ... def test_model_bf16(self): ... def test_model_pipeline_bf16(self): ... def test_generation_beyond_sliding_window(self, attn_implementation: str): ... def test_generation_beyond_sliding_window_dynamic(self, attn_implementation: str): ... # Task: Write a Python test method `test_export_static_cache` in test class `VaultGemmaIntegrationTest` to verify the behavior of `export_static_cache`. Module under test: packaging, parameterized, transformers
def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.5.0"): self.skipTest(reason="This test requires torch >= 2.5 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) model_id = "google/vaultgemma-1b" tokenizer = AutoTokenizer.from_pretrained(model_id, pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETIONS = Expectations( { ("cuda", 8): ["Hello I am doing a project on a 1990 240sx. I have a 1"], } ) EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation() max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompts = ["Hello I am doing"] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + export from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
test
0
{"function_name": "test_export_static_cache", "class_name": "VaultGemmaIntegrationTest", "qualname": "VaultGemmaIntegrationTest.test_export_static_cache", "file_path": "tests/models/vaultgemma/test_modeling_vaultgemma.py", "repo_id": "huggingface/transformers", "loc": 60, "tested_modules": ["packaging", "parameterized", "transformers", "transformers.cache_utils", "transformers.generation.configuration_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/models/molmo2.py:Molmo2VisionBlock:class_doc
Write a class-level docstring for `Molmo2VisionBlock` (inherits from nn.Module) which has methods: `__init__`, `forward`.
Residual attention block used in Vision Transformer.
documentation
1
{"doc_type": "class", "class_name": "Molmo2VisionBlock", "file_path": "vllm/model_executor/models/molmo2.py", "repo_id": "vllm-project/vllm", "char_length": 52, "methods": ["__init__", "forward"]}
ray-project/ray:release/train_tests/benchmark/runner.py:TrainLoopRunner._train_epoch
# Context: import pprint class VanillaTorchRunner(TrainLoopRunner): ... class TrainLoopRunner: def __init__(self, factory: BenchmarkFactory): self.factory = factory self.benchmark_config = factory.benchmark_config self._setup() # Training progress state. self._train_batch_idx: int = 0 self._train_epoch_idx: int = 0 self._global_rows_processed_this_epoch: int = 0 # Performance metrics self._metrics = collections.defaultdict(lambda: Timer()) checkpoint = ray.train.get_checkpoint() if checkpoint: self._restore_from_checkpoint(checkpoint) def _setup(self): ... def _cleanup(self): ... def _train_step(self, train_dataloader): ... def _validate_step(self, val_dataloader): ... def _save_training_state(self, local_dir: str): ... def _load_training_state(self, local_dir: str): ... def _restore_from_checkpoint(self, checkpoint: ray.train.Checkpoint): ... def _wrap_dataloader(self, dataloader, train: bool): ... def _num_batches_to_skip(self) -> int: ... def _validate_epoch(self) -> Dict[str, float]: ... def _should_checkpoint_during_epoch(self) -> bool: ... def _should_validate_during_epoch(self) -> bool: ... def _should_log_metrics(self) -> bool: ... def _validate(self) -> Dict[str, float]: ... def _checkpoint(self, metrics: Optional[Dict[str, float]]): ... def _load_checkpoint(self, local_dir: str): ... def _save_checkpoint(self, local_dir: str): ... def _report_checkpoint(self, metrics, checkpoint): ... def run(self): ... def get_metrics(self, dataset_creation_time: float) -> Dict[str, float]: ... # Task: Write a Python method `_train_epoch` for the class `TrainLoopRunner` to subclasses can override the entrire `_train_epoch` method for more training.
def _train_epoch(self): """Subclasses can override the entrire `_train_epoch` method for more training logic customization.""" if ray.train.get_context().get_world_rank() == 0: logger.info(f"Training starting @ epoch={self._train_epoch_idx}") train_dataloader = self.factory.get_train_dataloader() train_dataloader = self._wrap_dataloader(train_dataloader, train=True) # Skip through batches if we restored to a middle of the epoch. # TODO: Compare this baseline to the data checkpointing approach once we have it. if self._num_batches_to_skip: if ray.train.get_context().get_world_rank() == 0: logger.info(f"Skipping {self._num_batches_to_skip} batches...") for _ in range(self._num_batches_to_skip): with self._metrics["train/iter_skip_batch"].timer(): next(train_dataloader) for batch in train_dataloader: with self._metrics["train/step"].timer(): if not self.benchmark_config.skip_train_step: self._train_step(batch) # TODO: This is slightly off if the last batch is a partial batch (if drop_last=False) global_batch_size = ( self.benchmark_config.dataloader_config.train_batch_size * ray.train.get_context().get_world_size() ) self._metrics["train/rows_processed"].add(global_batch_size) self._global_rows_processed_this_epoch += global_batch_size if self._should_checkpoint_during_epoch(): self._checkpoint() if self._should_validate_during_epoch(): validation_metrics = self._validate() self._checkpoint(validation_metrics) if self._should_log_metrics(): logger.info(pprint.pformat(self.get_metrics(), indent=2)) self._train_epoch_idx += 1 self._train_batch_idx = 0 self._global_rows_processed_this_epoch = 0
function_complex
0
{"cognitive_complexity": 16, "loc": 46, "code_loc": 30, "docstring_loc": 2, "function_name": "_train_epoch", "class_name": "TrainLoopRunner", "qualname": "TrainLoopRunner._train_epoch", "file_path": "release/train_tests/benchmark/runner.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
666ghj/BettaFish:MediaEngine/state/state.py:State.save_to_file
# Context: class Search: ... class Research: ... class Paragraph: ... class State: def add_paragraph(self, title: str, content: str) -> int: ... def get_paragraph(self, index: int) -> Optional[Paragraph]: ... def get_completed_paragraphs_count(self) -> int: ... def get_total_paragraphs_count(self) -> int: ... def is_all_paragraphs_completed(self) -> bool: ... def mark_completed(self): ... def update_timestamp(self): ... def get_progress_summary(self) -> Dict[str, Any]: ... def to_dict(self) -> Dict[str, Any]: ... def to_json(self, indent: int) -> str: ... def from_dict(cls, data: Dict[str, Any]) -> 'State': ... def from_json(cls, json_str: str) -> 'State': ... def load_from_file(cls, filepath: str) -> 'State': ... # Task: Write a Python method `save_to_file` for the class `State` to 保存状态到文件. Parameters: filepath: str
def save_to_file(self, filepath: str): """保存状态到文件""" with open(filepath, 'w', encoding='utf-8') as f: f.write(self.to_json())
function_simple
1
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "save_to_file", "class_name": "State", "qualname": "State.save_to_file", "file_path": "MediaEngine/state/state.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_runnable"}
langflow-ai/langflow:src/backend/tests/unit/utils/test_mcp_cleanup.py:TestTryTerminateMcpProcess.test_terminates_mcp_proxy_process
# Context: from unittest.mock import AsyncMock, MagicMock, patch from langflow.utils.mcp_cleanup import ( _kill_mcp_processes, _terminate_child_mcp_processes, _terminate_orphaned_mcp_processes, _try_terminate_mcp_process, cleanup_mcp_sessions, ) class TestCleanupMcpSessions: ... class TestKillMcpProcesses: ... class TestTerminateChildMcpProcesses: ... class TestTerminateOrphanedMcpProcesses: ... class TestMcpCleanupIntegration: ... class TestTryTerminateMcpProcess: async def test_terminates_mcp_server_process(self): ... async def test_skips_non_mcp_process(self): ... async def test_kills_process_on_timeout(self): ... async def test_handles_no_such_process(self): ... async def test_handles_access_denied(self): ... async def test_handles_zombie_process(self): ... async def test_handles_empty_cmdline(self): ... async def test_handles_none_cmdline(self): ... # Task: Write a Python test method `test_terminates_mcp_proxy_process` in test class `TestTryTerminateMcpProcess` to test termination of mcp-proxy process. Module under test: langflow.utils.mcp_cleanup
async def test_terminates_mcp_proxy_process(self): """Test termination of mcp-proxy process.""" mock_psutil = MagicMock() mock_psutil.NoSuchProcess = Exception mock_psutil.AccessDenied = Exception mock_psutil.ZombieProcess = Exception mock_psutil.TimeoutExpired = Exception mock_proc = MagicMock() mock_proc.cmdline.return_value = ["mcp-proxy", "--port", "8080"] mock_proc.terminate = MagicMock() mock_proc.wait = MagicMock() result = await _try_terminate_mcp_process(mock_proc, mock_psutil) assert result is True mock_proc.terminate.assert_called_once()
test
1
{"function_name": "test_terminates_mcp_proxy_process", "class_name": "TestTryTerminateMcpProcess", "qualname": "TestTryTerminateMcpProcess.test_terminates_mcp_proxy_process", "file_path": "src/backend/tests/unit/utils/test_mcp_cleanup.py", "repo_id": "langflow-ai/langflow", "loc": 17, "tested_modules": ["langflow.utils.mcp_cleanup"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/anthropic/completion.py:AnthropicCompletion.call
# Context: import logging from typing import TYPE_CHECKING, Any, Final, Literal, TypeGuard, cast from pydantic import BaseModel from crewai.llms.base_llm import BaseLLM, llm_call_context from crewai.utilities.types import LLMMessage def _supports_native_structured_outputs(model: str) -> bool: ... def _is_pydantic_model_class(obj: Any) -> TypeGuard[type[BaseModel]]: ... def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool: ... class AnthropicThinkingConfig(BaseModel): ... class AnthropicCompletion(BaseLLM): def __init__( self, model: str = "claude-3-5-sonnet-20241022", api_key: str | None = None, base_url: str | None = None, timeout: float | None = None, max_retries: int = 2, temperature: float | None = None, max_tokens: int = 4096, # Required for Anthropic top_p: float | None = None, stop_sequences: list[str] | None = None, stream: bool = False, client_params: dict[str, Any] | None = None, interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None, thinking: AnthropicThinkingConfig | None = None, response_format: type[BaseModel] | None = None, **kwargs: Any, ): """Initialize Anthropic chat completion client. Args: model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022') api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var) base_url: Custom base URL for Anthropic API timeout: Request timeout in seconds max_retries: Maximum number of retries temperature: Sampling temperature (0-1) max_tokens: Maximum tokens in response (required for Anthropic) top_p: Nucleus sampling parameter stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop) stream: Enable streaming responses client_params: Additional parameters for the Anthropic client interceptor: HTTP interceptor for modifying requests/responses at transport level. response_format: Pydantic model for structured output. When provided, responses will be validated against this model schema. **kwargs: Additional parameters """ super().__init__( model=model, temperature=temperature, stop=stop_sequences or [], **kwargs ) # Client params self.interceptor = interceptor self.client_params = client_params self.base_url = base_url self.timeout = timeout self.max_retries = max_retries self.client = Anthropic(**self._get_client_params()) async_client_params = self._get_client_params() if self.interceptor: async_transport = AsyncHTTPTransport(interceptor=self.interceptor) async_http_client = httpx.AsyncClient(transport=async_transport) async_client_params["http_client"] = async_http_client self.async_client = AsyncAnthropic(**async_client_params) # Store completion parameters self.max_tokens = max_tokens self.top_p = top_p self.stream = stream self.stop_sequences = stop_sequences or [] self.thinking = thinking self.previous_thinking_blocks: list[ThinkingBlock] = [] self.response_format = response_format # Model-specific settings self.is_claude_3 = "claude-3" in model.lower() self.supports_tools = True def stop(self) -> list[str]: ... def stop(self, value: list[str] | str | None) -> None: ... def _get_client_params(self) -> dict[str, Any]: ... async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _prepare_completion_params(self, messages: list[LLMMessage], system_message: str | None, tools: list[dict[str, Any]] | None, available_functions: dict[str, Any] | None) -> dict[str, Any]: ... def _convert_tools_for_interference(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]: ... def _extract_thinking_block(self, content_block: Any) -> ThinkingBlock | dict[str, Any] | None: ... def _format_messages_for_anthropic(self, messages: str | list[LLMMessage]) -> tuple[list[LLMMessage], str | None]: ... def _handle_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _handle_streaming_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _execute_tools_and_collect_results(self, tool_uses: list[ToolUseBlock | BetaToolUseBlock], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> list[dict[str, Any]]: ... def _execute_first_tool(self, tool_uses: list[ToolUseBlock | BetaToolUseBlock], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> Any | None: ... def _handle_tool_use_conversation(self, initial_response: Message | BetaMessage, tool_uses: list[ToolUseBlock | BetaToolUseBlock], params: dict[str, Any], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> str: ... async def _ahandle_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def _ahandle_streaming_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def _ahandle_tool_use_conversation(self, initial_response: Message | BetaMessage, tool_uses: list[ToolUseBlock | BetaToolUseBlock], params: dict[str, Any], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> str: ... def supports_function_calling(self) -> bool: ... def supports_stop_words(self) -> bool: ... def get_context_window_size(self) -> int: ... def _extract_anthropic_token_usage(response: Message | BetaMessage) -> dict[str, Any]: ... def supports_multimodal(self) -> bool: ... def get_file_uploader(self) -> Any: ... # Task: Write a Python method `call` for the class `AnthropicCompletion` to call Anthropic messages API. Parameters: messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None Returns: str | Any
def call( self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None = None, callbacks: list[Any] | None = None, available_functions: dict[str, Any] | None = None, from_task: Any | None = None, from_agent: Any | None = None, response_model: type[BaseModel] | None = None, ) -> str | Any: """Call Anthropic messages API. Args: messages: Input messages for the chat completion tools: List of tool/function definitions callbacks: Callback functions (not used in native implementation) available_functions: Available functions for tool calling from_task: Task that initiated the call from_agent: Agent that initiated the call Returns: Chat completion response or tool call result """ with llm_call_context(): try: # Emit call started event self._emit_call_started_event( messages=messages, tools=tools, callbacks=callbacks, available_functions=available_functions, from_task=from_task, from_agent=from_agent, ) # Format messages for Anthropic formatted_messages, system_message = ( self._format_messages_for_anthropic(messages) ) if not self._invoke_before_llm_call_hooks( formatted_messages, from_agent ): raise ValueError("LLM call blocked by before_llm_call hook") # Prepare completion parameters completion_params = self._prepare_completion_params( formatted_messages, system_message, tools, available_functions ) effective_response_model = response_model or self.response_format # Handle streaming vs non-streaming if self.stream: return self._handle_streaming_completion( completion_params, available_functions, from_task, from_agent, effective_response_model, ) return self._handle_completion( completion_params, available_functions, from_task, from_agent, effective_response_model, ) except Exception as e: error_msg = f"Anthropic API call failed: {e!s}" logging.error(error_msg) self._emit_call_failed_event( error=error_msg, from_task=from_task, from_agent=from_agent ) raise
function_complex
0
{"cognitive_complexity": 8, "loc": 77, "code_loc": 43, "docstring_loc": 13, "function_name": "call", "class_name": "AnthropicCompletion", "qualname": "AnthropicCompletion.call", "file_path": "lib/crewai/src/crewai/llms/providers/anthropic/completion.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py:DLinear.forward
# Context: import torch class moving_avg(nn.Module): ... class series_decomp(nn.Module): ... class DLinear(nn.Module): def __init__(self, configs: Dict[str, Any]): super().__init__() self.seq_len: int = configs["seq_len"] self.pred_len: int = configs["pred_len"] self.decompsition = series_decomp(kernel_size=KERNEL_SIZE) self.individual: bool = configs["individual"] self.channels: int = configs["enc_in"] if self.individual: self.Linear_Seasonal = nn.ModuleList() self.Linear_Trend = nn.ModuleList() for _ in range(self.channels): self.Linear_Seasonal.append(nn.Linear(self.seq_len, self.pred_len)) self.Linear_Trend.append(nn.Linear(self.seq_len, self.pred_len)) else: self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len) self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len) # Task: Write a Python method `forward` for the class `DLinear` to forward pass for the DLinear model. Parameters: x: torch.Tensor Returns: torch.Tensor
def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass for the DLinear model. Args: x (torch.Tensor): Input tensor. Can be 2D [Batch, SeqLen] (interpreted as 1 channel) or 3D [Batch, SeqLen, Channels]. Returns: torch.Tensor: Output tensor of shape [Batch, PredLen, Channels]. """ # DLinear model (and many time series models) expect input of shape: # (batch_size, sequence_length, num_input_features). # seasonal_init, trend_init shapes: [Batch, SeqLen, Channel]. seasonal_init, trend_init = self.decompsition(x) # Permute to [Batch, Channel, SeqLen] for Linear layers. seasonal_init = seasonal_init.permute(0, 2, 1) trend_init = trend_init.permute(0, 2, 1) if self.individual: seasonal_output = torch.zeros( [seasonal_init.size(0), seasonal_init.size(1), self.pred_len], dtype=seasonal_init.dtype, ).to(seasonal_init.device) trend_output = torch.zeros( [trend_init.size(0), trend_init.size(1), self.pred_len], dtype=trend_init.dtype, ).to(trend_init.device) for i in range(self.channels): seasonal_output[:, i, :] = self.Linear_Seasonal[i]( seasonal_init[:, i, :] ) trend_output[:, i, :] = self.Linear_Trend[i](trend_init[:, i, :]) else: # seasonal_init shape: [Batch, Channel, SeqLen]. # Linear layer applies to the last dim (SeqLen). seasonal_output = self.Linear_Seasonal( seasonal_init ) # Output: [Batch, Channel, PredLen]. trend_output = self.Linear_Trend( trend_init ) # Output: [Batch, Channel, PredLen]. output_x = seasonal_output + trend_output # Shape: [Batch, Channel, PredLen]. return output_x.permute(0, 2, 1) # Transform to [Batch, PredLen, Channel].
function_simple
0
{"cognitive_complexity": 4, "loc": 46, "code_loc": 26, "docstring_loc": 10, "function_name": "forward", "class_name": "DLinear", "qualname": "DLinear.forward", "file_path": "doc/source/ray-overview/examples/e2e-timeseries/e2e_timeseries/model.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"}
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
38