task_id stringlengths 15 15 | repo stringclasses 9 values | file_path stringlengths 17 49 | function_name stringlengths 4 33 | qualified_name stringlengths 4 35 | function_type stringclasses 2 values | class_name stringclasses 4 values | prompt stringlengths 422 16.4k | signature stringlengths 22 792 | docstring stringlengths 0 549 | canonical_solution stringlengths 106 1.36k | full_function stringlengths 129 1.75k | tests stringlengths 563 526k | setup stringclasses 9 values | metadata stringlengths 74 77 | validation stringlengths 36 72 | original_task_id stringlengths 15 15 | contamination_label stringclasses 2 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
repo_patch/0001 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | normalize_output_item | normalize_output_item | function | null | """
Job utilities for the /api/jobs endpoint.
Provides normalization and helper functions for job status tracking.
"""
from typing import Optional
from comfy_api.internal import prune_dict
class JobStatus:
"""Job status constants."""
PENDING = 'pending'
IN_PROGRESS = 'in_progress'
COMPLETED = 'completed'
FAILED = 'failed'
CANCELLED = 'cancelled'
ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED, CANCELLED]
# Media types that can be previewed in the frontend
PREVIEWABLE_MEDIA_TYPES = frozenset({'images', 'video', 'audio', '3d', 'text'})
# 3D file extensions for preview fallback (no dedicated media_type exists)
THREE_D_EXTENSIONS = frozenset({'.obj', '.fbx', '.gltf', '.glb', '.usdz'})
def has_3d_extension(filename: str) -> bool:
lower = filename.lower()
return any(lower.endswith(ext) for ext in THREE_D_EXTENSIONS)
def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
"""
# TODO: Implement this function
def normalize_outputs(outputs: dict) -> dict:
"""Normalize raw node outputs for the jobs API.
Transforms string 3D filenames into file output dicts and removes
None items. All other items (non-3D strings, dicts, etc.) are
preserved as-is.
"""
normalized = {}
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
normalized[node_id] = node_outputs
continue
normalized_node = {}
for media_type, items in node_outputs.items():
if media_type == 'animated' or not isinstance(items, list):
normalized_node[media_type] = items
continue
normalized_items = []
for item in items:
if item is None:
continue
norm = normalize_output_item(item)
normalized_items.append(norm if norm is not None else item)
normalized_node[media_type] = normalized_items
normalized[node_id] = normalized_node
return normalized
# Text preview truncation limit (1024 characters) to prevent preview_output bloat
TEXT_PREVIEW_MAX_LENGTH = 1024
def _create_text_preview(value: str) -> dict:
"""Create a text preview dict with optional truncation.
Returns:
dict with 'content' and optionally 'truncated' flag
"""
if len(value) <= TEXT_PREVIEW_MAX_LENGTH:
return {'content': value}
return {
'content': value[:TEXT_PREVIEW_MAX_LENGTH],
'truncated': True
}
def _extract_job_metadata(extra_data: dict) -> tuple[Optional[int], Optional[str]]:
"""Extract create_time and workflow_id from extra_data.
Returns:
tuple: (create_time, workflow_id)
"""
create_time = extra_data.get('create_time')
extra_pnginfo = extra_data.get('extra_pnginfo', {})
workflow_id = extra_pnginfo.get('workflow', {}).get('id')
return create_time, workflow_id
def is_previewable(media_type: str, item: dict) -> bool:
"""
Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz)
"""
if media_type in PREVIEWABLE_MEDIA_TYPES:
return True
# Check format field (MIME type).
# Maintains backwards compatibility with how custom node outputs are handled in the frontend.
fmt = item.get('format', '')
if fmt and (fmt.startswith('video/') or fmt.startswith('audio/')):
return True
# Check for 3D files by extension
filename = item.get('filename', '').lower()
if any(filename.endswith(ext) for ext in THREE_D_EXTENSIONS):
return True
return False
def normalize_queue_item(item: tuple, status: str) -> dict:
"""Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements).
"""
priority, prompt_id, _, extra_data, _ = item
create_time, workflow_id = _extract_job_metadata(extra_data)
return prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'outputs_count': 0,
'workflow_id': workflow_id,
})
def normalize_history_item(prompt_id: str, history_item: dict, include_outputs: bool = False) -> dict:
"""Convert history item dict to unified job dict.
History items have sensitive data already removed (prompt tuple has 5 elements).
"""
prompt_tuple = history_item['prompt']
priority, _, prompt, extra_data, _ = prompt_tuple
create_time, workflow_id = _extract_job_metadata(extra_data)
status_info = history_item.get('status', {})
status_str = status_info.get('status_str') if status_info else None
outputs = history_item.get('outputs', {})
outputs_count, preview_output = get_outputs_summary(outputs)
execution_error = None
execution_start_time = None
execution_end_time = None
was_interrupted = False
if status_info:
messages = status_info.get('messages', [])
for entry in messages:
if isinstance(entry, (list, tuple)) and len(entry) >= 2:
event_name, event_data = entry[0], entry[1]
if isinstance(event_data, dict):
if event_name == 'execution_start':
execution_start_time = event_data.get('timestamp')
elif event_name in ('execution_success', 'execution_error', 'execution_interrupted'):
execution_end_time = event_data.get('timestamp')
if event_name == 'execution_error':
execution_error = event_data
elif event_name == 'execution_interrupted':
was_interrupted = True
if status_str == 'success':
status = JobStatus.COMPLETED
elif status_str == 'error':
status = JobStatus.CANCELLED if was_interrupted else JobStatus.FAILED
else:
status = JobStatus.COMPLETED
job = prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'execution_start_time': execution_start_time,
'execution_end_time': execution_end_time,
'execution_error': execution_error,
'outputs_count': outputs_count,
'preview_output': preview_output,
'workflow_id': workflow_id,
})
if include_outputs:
job['outputs'] = normalize_outputs(outputs)
job['execution_status'] = status_info
job['workflow'] = {
'prompt': prompt,
'extra_data': extra_data,
}
return job
def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:
"""
Count outputs and find preview in a single pass.
Returns (outputs_count, preview_output).
Preview priority (matching frontend):
1. type="output" with previewable media
2. Any previewable media
"""
count = 0
preview_output = None
fallback_preview = None
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
continue
for media_type, items in node_outputs.items():
# 'animated' is a boolean flag, not actual output items
if media_type == 'animated' or not isinstance(items, list):
continue
for item in items:
if not isinstance(item, dict):
# Handle text outputs (non-dict items like strings or tuples)
normalized = normalize_output_item(item)
if normalized is None:
# Not a 3D file string — check for text preview
if media_type == 'text':
count += 1
if preview_output is None:
if isinstance(item, tuple):
text_value = item[0] if item else ''
else:
text_value = str(item)
text_preview = _create_text_preview(text_value)
enriched = {
**text_preview,
'nodeId': node_id,
'mediaType': media_type
}
if fallback_preview is None:
fallback_preview = enriched
continue
# normalize_output_item returned a dict (e.g. 3D file)
item = normalized
count += 1
if preview_output is not None:
continue
if is_previewable(media_type, item):
enriched = {
**item,
'nodeId': node_id,
}
if 'mediaType' not in item:
enriched['mediaType'] = media_type
if item.get('type') == 'output':
preview_output = enriched
elif fallback_preview is None:
fallback_preview = enriched
return count, preview_output or fallback_preview
def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:
"""Sort jobs list by specified field and order."""
reverse = (sort_order == 'desc')
if sort_by == 'execution_duration':
def get_sort_key(job):
start = job.get('execution_start_time', 0)
end = job.get('execution_end_time', 0)
return end - start if end and start else 0
else:
def get_sort_key(job):
return job.get('create_time', 0)
return sorted(jobs, key=get_sort_key, reverse=reverse)
def get_job(prompt_id: str, running: list, queued: list, history: dict) -> Optional[dict]:
"""
Get a single job by prompt_id from history or queue.
Args:
prompt_id: The prompt ID to look up
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
Returns:
Job dict with full details, or None if not found
"""
if prompt_id in history:
return normalize_history_item(prompt_id, history[prompt_id], include_outputs=True)
for item in running:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.IN_PROGRESS)
for item in queued:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.PENDING)
return None
def get_all_jobs(
running: list,
queued: list,
history: dict,
status_filter: Optional[list[str]] = None,
workflow_id: Optional[str] = None,
sort_by: str = "created_at",
sort_order: str = "desc",
limit: Optional[int] = None,
offset: int = 0
) -> tuple[list[dict], int]:
"""
Get all jobs (running, pending, completed) with filtering and sorting.
Args:
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
status_filter: List of statuses to include (from JobStatus.ALL)
workflow_id: Filter by workflow ID
sort_by: Field to sort by ('created_at', 'execution_duration')
sort_order: 'asc' or 'desc'
limit: Maximum number of items to return
offset: Number of items to skip
Returns:
tuple: (jobs_list, total_count)
"""
jobs = []
if status_filter is None:
status_filter = JobStatus.ALL
if JobStatus.IN_PROGRESS in status_filter:
for item in running:
jobs.append(normalize_queue_item(item, JobStatus.IN_PROGRESS))
if JobStatus.PENDING in status_filter:
for item in queued:
jobs.append(normalize_queue_item(item, JobStatus.PENDING))
history_statuses = {JobStatus.COMPLETED, JobStatus.FAILED, JobStatus.CANCELLED}
requested_history_statuses = history_statuses & set(status_filter)
if requested_history_statuses:
for prompt_id, history_item in history.items():
job = normalize_history_item(prompt_id, history_item)
if job.get('status') in requested_history_statuses:
jobs.append(job)
if workflow_id:
jobs = [j for j in jobs if j.get('workflow_id') == workflow_id]
jobs = apply_sorting(jobs, sort_by, sort_order)
total_count = len(jobs)
if offset > 0:
jobs = jobs[offset:]
if limit is not None:
jobs = jobs[:limit]
return (jobs, total_count) | def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
""" | Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts. | if item is None:
return None
if isinstance(item, str):
if has_3d_extension(item):
return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'}
return None
if isinstance(item, dict):
return item
return None | def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
"""
if item is None:
return None
if isinstance(item, str):
if has_3d_extension(item):
return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'}
return None
if isinstance(item, dict):
return item
return None | [{"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_none_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_none_returns_none -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_string_3d_extension_synthesizes_dict", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_string_3d_extension_synthesizes_dict -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_string_non_3d_extension_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_string_non_3d_extension_returns_none -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_string_no_extension_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_string_no_extension_returns_none -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_dict_passes_through", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_dict_passes_through -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_other_types_return_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_other_types_return_none -xvs"}] | {"repo_url": "https://github.com/Comfy-Org/ComfyUI", "install_cmd": "pip install -e .", "commit_sha": "dff0a4a15887383c90a031e3fd48ebc41f6928e7", "frozen_requirements": "frozen_requirements/Comfy-Org_ComfyUI.txt"} | {"body_lines": 9, "file_lines": 390, "has_docstring": true, "num_tests": 6} | {"status": "passed", "tests_run": 6} | repo_patch/0001 | file_overlap |
repo_patch/0002 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | normalize_queue_item | normalize_queue_item | function | null | """
Job utilities for the /api/jobs endpoint.
Provides normalization and helper functions for job status tracking.
"""
from typing import Optional
from comfy_api.internal import prune_dict
class JobStatus:
"""Job status constants."""
PENDING = 'pending'
IN_PROGRESS = 'in_progress'
COMPLETED = 'completed'
FAILED = 'failed'
CANCELLED = 'cancelled'
ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED, CANCELLED]
# Media types that can be previewed in the frontend
PREVIEWABLE_MEDIA_TYPES = frozenset({'images', 'video', 'audio', '3d', 'text'})
# 3D file extensions for preview fallback (no dedicated media_type exists)
THREE_D_EXTENSIONS = frozenset({'.obj', '.fbx', '.gltf', '.glb', '.usdz'})
def has_3d_extension(filename: str) -> bool:
lower = filename.lower()
return any(lower.endswith(ext) for ext in THREE_D_EXTENSIONS)
def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
"""
if item is None:
return None
if isinstance(item, str):
if has_3d_extension(item):
return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'}
return None
if isinstance(item, dict):
return item
return None
def normalize_outputs(outputs: dict) -> dict:
"""Normalize raw node outputs for the jobs API.
Transforms string 3D filenames into file output dicts and removes
None items. All other items (non-3D strings, dicts, etc.) are
preserved as-is.
"""
normalized = {}
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
normalized[node_id] = node_outputs
continue
normalized_node = {}
for media_type, items in node_outputs.items():
if media_type == 'animated' or not isinstance(items, list):
normalized_node[media_type] = items
continue
normalized_items = []
for item in items:
if item is None:
continue
norm = normalize_output_item(item)
normalized_items.append(norm if norm is not None else item)
normalized_node[media_type] = normalized_items
normalized[node_id] = normalized_node
return normalized
# Text preview truncation limit (1024 characters) to prevent preview_output bloat
TEXT_PREVIEW_MAX_LENGTH = 1024
def _create_text_preview(value: str) -> dict:
"""Create a text preview dict with optional truncation.
Returns:
dict with 'content' and optionally 'truncated' flag
"""
if len(value) <= TEXT_PREVIEW_MAX_LENGTH:
return {'content': value}
return {
'content': value[:TEXT_PREVIEW_MAX_LENGTH],
'truncated': True
}
def _extract_job_metadata(extra_data: dict) -> tuple[Optional[int], Optional[str]]:
"""Extract create_time and workflow_id from extra_data.
Returns:
tuple: (create_time, workflow_id)
"""
create_time = extra_data.get('create_time')
extra_pnginfo = extra_data.get('extra_pnginfo', {})
workflow_id = extra_pnginfo.get('workflow', {}).get('id')
return create_time, workflow_id
def is_previewable(media_type: str, item: dict) -> bool:
"""
Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz)
"""
if media_type in PREVIEWABLE_MEDIA_TYPES:
return True
# Check format field (MIME type).
# Maintains backwards compatibility with how custom node outputs are handled in the frontend.
fmt = item.get('format', '')
if fmt and (fmt.startswith('video/') or fmt.startswith('audio/')):
return True
# Check for 3D files by extension
filename = item.get('filename', '').lower()
if any(filename.endswith(ext) for ext in THREE_D_EXTENSIONS):
return True
return False
def normalize_queue_item(item: tuple, status: str) -> dict:
"""Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements).
"""
# TODO: Implement this function
def normalize_history_item(prompt_id: str, history_item: dict, include_outputs: bool = False) -> dict:
"""Convert history item dict to unified job dict.
History items have sensitive data already removed (prompt tuple has 5 elements).
"""
prompt_tuple = history_item['prompt']
priority, _, prompt, extra_data, _ = prompt_tuple
create_time, workflow_id = _extract_job_metadata(extra_data)
status_info = history_item.get('status', {})
status_str = status_info.get('status_str') if status_info else None
outputs = history_item.get('outputs', {})
outputs_count, preview_output = get_outputs_summary(outputs)
execution_error = None
execution_start_time = None
execution_end_time = None
was_interrupted = False
if status_info:
messages = status_info.get('messages', [])
for entry in messages:
if isinstance(entry, (list, tuple)) and len(entry) >= 2:
event_name, event_data = entry[0], entry[1]
if isinstance(event_data, dict):
if event_name == 'execution_start':
execution_start_time = event_data.get('timestamp')
elif event_name in ('execution_success', 'execution_error', 'execution_interrupted'):
execution_end_time = event_data.get('timestamp')
if event_name == 'execution_error':
execution_error = event_data
elif event_name == 'execution_interrupted':
was_interrupted = True
if status_str == 'success':
status = JobStatus.COMPLETED
elif status_str == 'error':
status = JobStatus.CANCELLED if was_interrupted else JobStatus.FAILED
else:
status = JobStatus.COMPLETED
job = prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'execution_start_time': execution_start_time,
'execution_end_time': execution_end_time,
'execution_error': execution_error,
'outputs_count': outputs_count,
'preview_output': preview_output,
'workflow_id': workflow_id,
})
if include_outputs:
job['outputs'] = normalize_outputs(outputs)
job['execution_status'] = status_info
job['workflow'] = {
'prompt': prompt,
'extra_data': extra_data,
}
return job
def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:
"""
Count outputs and find preview in a single pass.
Returns (outputs_count, preview_output).
Preview priority (matching frontend):
1. type="output" with previewable media
2. Any previewable media
"""
count = 0
preview_output = None
fallback_preview = None
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
continue
for media_type, items in node_outputs.items():
# 'animated' is a boolean flag, not actual output items
if media_type == 'animated' or not isinstance(items, list):
continue
for item in items:
if not isinstance(item, dict):
# Handle text outputs (non-dict items like strings or tuples)
normalized = normalize_output_item(item)
if normalized is None:
# Not a 3D file string — check for text preview
if media_type == 'text':
count += 1
if preview_output is None:
if isinstance(item, tuple):
text_value = item[0] if item else ''
else:
text_value = str(item)
text_preview = _create_text_preview(text_value)
enriched = {
**text_preview,
'nodeId': node_id,
'mediaType': media_type
}
if fallback_preview is None:
fallback_preview = enriched
continue
# normalize_output_item returned a dict (e.g. 3D file)
item = normalized
count += 1
if preview_output is not None:
continue
if is_previewable(media_type, item):
enriched = {
**item,
'nodeId': node_id,
}
if 'mediaType' not in item:
enriched['mediaType'] = media_type
if item.get('type') == 'output':
preview_output = enriched
elif fallback_preview is None:
fallback_preview = enriched
return count, preview_output or fallback_preview
def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:
"""Sort jobs list by specified field and order."""
reverse = (sort_order == 'desc')
if sort_by == 'execution_duration':
def get_sort_key(job):
start = job.get('execution_start_time', 0)
end = job.get('execution_end_time', 0)
return end - start if end and start else 0
else:
def get_sort_key(job):
return job.get('create_time', 0)
return sorted(jobs, key=get_sort_key, reverse=reverse)
def get_job(prompt_id: str, running: list, queued: list, history: dict) -> Optional[dict]:
"""
Get a single job by prompt_id from history or queue.
Args:
prompt_id: The prompt ID to look up
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
Returns:
Job dict with full details, or None if not found
"""
if prompt_id in history:
return normalize_history_item(prompt_id, history[prompt_id], include_outputs=True)
for item in running:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.IN_PROGRESS)
for item in queued:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.PENDING)
return None
def get_all_jobs(
running: list,
queued: list,
history: dict,
status_filter: Optional[list[str]] = None,
workflow_id: Optional[str] = None,
sort_by: str = "created_at",
sort_order: str = "desc",
limit: Optional[int] = None,
offset: int = 0
) -> tuple[list[dict], int]:
"""
Get all jobs (running, pending, completed) with filtering and sorting.
Args:
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
status_filter: List of statuses to include (from JobStatus.ALL)
workflow_id: Filter by workflow ID
sort_by: Field to sort by ('created_at', 'execution_duration')
sort_order: 'asc' or 'desc'
limit: Maximum number of items to return
offset: Number of items to skip
Returns:
tuple: (jobs_list, total_count)
"""
jobs = []
if status_filter is None:
status_filter = JobStatus.ALL
if JobStatus.IN_PROGRESS in status_filter:
for item in running:
jobs.append(normalize_queue_item(item, JobStatus.IN_PROGRESS))
if JobStatus.PENDING in status_filter:
for item in queued:
jobs.append(normalize_queue_item(item, JobStatus.PENDING))
history_statuses = {JobStatus.COMPLETED, JobStatus.FAILED, JobStatus.CANCELLED}
requested_history_statuses = history_statuses & set(status_filter)
if requested_history_statuses:
for prompt_id, history_item in history.items():
job = normalize_history_item(prompt_id, history_item)
if job.get('status') in requested_history_statuses:
jobs.append(job)
if workflow_id:
jobs = [j for j in jobs if j.get('workflow_id') == workflow_id]
jobs = apply_sorting(jobs, sort_by, sort_order)
total_count = len(jobs)
if offset > 0:
jobs = jobs[offset:]
if limit is not None:
jobs = jobs[:limit]
return (jobs, total_count) | def normalize_queue_item(item: tuple, status: str) -> dict:
"""Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements).
""" | Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements). | priority, prompt_id, _, extra_data, _ = item
create_time, workflow_id = _extract_job_metadata(extra_data)
return prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'outputs_count': 0,
'workflow_id': workflow_id,
}) | def normalize_queue_item(item: tuple, status: str) -> dict:
"""Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements).
"""
priority, prompt_id, _, extra_data, _ = item
create_time, workflow_id = _extract_job_metadata(extra_data)
return prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'outputs_count': 0,
'workflow_id': workflow_id,
}) | [{"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeQueueItem.test_basic_normalization", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeQueueItem::test_basic_normalization -xvs"}] | {"repo_url": "https://github.com/Comfy-Org/ComfyUI", "install_cmd": "pip install -e .", "commit_sha": "dff0a4a15887383c90a031e3fd48ebc41f6928e7", "frozen_requirements": "frozen_requirements/Comfy-Org_ComfyUI.txt"} | {"body_lines": 10, "file_lines": 390, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0002 | file_overlap |
repo_patch/0003 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | is_previewable | is_previewable | function | null | """
Job utilities for the /api/jobs endpoint.
Provides normalization and helper functions for job status tracking.
"""
from typing import Optional
from comfy_api.internal import prune_dict
class JobStatus:
"""Job status constants."""
PENDING = 'pending'
IN_PROGRESS = 'in_progress'
COMPLETED = 'completed'
FAILED = 'failed'
CANCELLED = 'cancelled'
ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED, CANCELLED]
# Media types that can be previewed in the frontend
PREVIEWABLE_MEDIA_TYPES = frozenset({'images', 'video', 'audio', '3d', 'text'})
# 3D file extensions for preview fallback (no dedicated media_type exists)
THREE_D_EXTENSIONS = frozenset({'.obj', '.fbx', '.gltf', '.glb', '.usdz'})
def has_3d_extension(filename: str) -> bool:
lower = filename.lower()
return any(lower.endswith(ext) for ext in THREE_D_EXTENSIONS)
def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
"""
if item is None:
return None
if isinstance(item, str):
if has_3d_extension(item):
return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'}
return None
if isinstance(item, dict):
return item
return None
def normalize_outputs(outputs: dict) -> dict:
"""Normalize raw node outputs for the jobs API.
Transforms string 3D filenames into file output dicts and removes
None items. All other items (non-3D strings, dicts, etc.) are
preserved as-is.
"""
normalized = {}
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
normalized[node_id] = node_outputs
continue
normalized_node = {}
for media_type, items in node_outputs.items():
if media_type == 'animated' or not isinstance(items, list):
normalized_node[media_type] = items
continue
normalized_items = []
for item in items:
if item is None:
continue
norm = normalize_output_item(item)
normalized_items.append(norm if norm is not None else item)
normalized_node[media_type] = normalized_items
normalized[node_id] = normalized_node
return normalized
# Text preview truncation limit (1024 characters) to prevent preview_output bloat
TEXT_PREVIEW_MAX_LENGTH = 1024
def _create_text_preview(value: str) -> dict:
"""Create a text preview dict with optional truncation.
Returns:
dict with 'content' and optionally 'truncated' flag
"""
if len(value) <= TEXT_PREVIEW_MAX_LENGTH:
return {'content': value}
return {
'content': value[:TEXT_PREVIEW_MAX_LENGTH],
'truncated': True
}
def _extract_job_metadata(extra_data: dict) -> tuple[Optional[int], Optional[str]]:
"""Extract create_time and workflow_id from extra_data.
Returns:
tuple: (create_time, workflow_id)
"""
create_time = extra_data.get('create_time')
extra_pnginfo = extra_data.get('extra_pnginfo', {})
workflow_id = extra_pnginfo.get('workflow', {}).get('id')
return create_time, workflow_id
def is_previewable(media_type: str, item: dict) -> bool:
"""
Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz)
"""
# TODO: Implement this function
def normalize_queue_item(item: tuple, status: str) -> dict:
"""Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements).
"""
priority, prompt_id, _, extra_data, _ = item
create_time, workflow_id = _extract_job_metadata(extra_data)
return prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'outputs_count': 0,
'workflow_id': workflow_id,
})
def normalize_history_item(prompt_id: str, history_item: dict, include_outputs: bool = False) -> dict:
"""Convert history item dict to unified job dict.
History items have sensitive data already removed (prompt tuple has 5 elements).
"""
prompt_tuple = history_item['prompt']
priority, _, prompt, extra_data, _ = prompt_tuple
create_time, workflow_id = _extract_job_metadata(extra_data)
status_info = history_item.get('status', {})
status_str = status_info.get('status_str') if status_info else None
outputs = history_item.get('outputs', {})
outputs_count, preview_output = get_outputs_summary(outputs)
execution_error = None
execution_start_time = None
execution_end_time = None
was_interrupted = False
if status_info:
messages = status_info.get('messages', [])
for entry in messages:
if isinstance(entry, (list, tuple)) and len(entry) >= 2:
event_name, event_data = entry[0], entry[1]
if isinstance(event_data, dict):
if event_name == 'execution_start':
execution_start_time = event_data.get('timestamp')
elif event_name in ('execution_success', 'execution_error', 'execution_interrupted'):
execution_end_time = event_data.get('timestamp')
if event_name == 'execution_error':
execution_error = event_data
elif event_name == 'execution_interrupted':
was_interrupted = True
if status_str == 'success':
status = JobStatus.COMPLETED
elif status_str == 'error':
status = JobStatus.CANCELLED if was_interrupted else JobStatus.FAILED
else:
status = JobStatus.COMPLETED
job = prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'execution_start_time': execution_start_time,
'execution_end_time': execution_end_time,
'execution_error': execution_error,
'outputs_count': outputs_count,
'preview_output': preview_output,
'workflow_id': workflow_id,
})
if include_outputs:
job['outputs'] = normalize_outputs(outputs)
job['execution_status'] = status_info
job['workflow'] = {
'prompt': prompt,
'extra_data': extra_data,
}
return job
def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:
"""
Count outputs and find preview in a single pass.
Returns (outputs_count, preview_output).
Preview priority (matching frontend):
1. type="output" with previewable media
2. Any previewable media
"""
count = 0
preview_output = None
fallback_preview = None
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
continue
for media_type, items in node_outputs.items():
# 'animated' is a boolean flag, not actual output items
if media_type == 'animated' or not isinstance(items, list):
continue
for item in items:
if not isinstance(item, dict):
# Handle text outputs (non-dict items like strings or tuples)
normalized = normalize_output_item(item)
if normalized is None:
# Not a 3D file string — check for text preview
if media_type == 'text':
count += 1
if preview_output is None:
if isinstance(item, tuple):
text_value = item[0] if item else ''
else:
text_value = str(item)
text_preview = _create_text_preview(text_value)
enriched = {
**text_preview,
'nodeId': node_id,
'mediaType': media_type
}
if fallback_preview is None:
fallback_preview = enriched
continue
# normalize_output_item returned a dict (e.g. 3D file)
item = normalized
count += 1
if preview_output is not None:
continue
if is_previewable(media_type, item):
enriched = {
**item,
'nodeId': node_id,
}
if 'mediaType' not in item:
enriched['mediaType'] = media_type
if item.get('type') == 'output':
preview_output = enriched
elif fallback_preview is None:
fallback_preview = enriched
return count, preview_output or fallback_preview
def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:
"""Sort jobs list by specified field and order."""
reverse = (sort_order == 'desc')
if sort_by == 'execution_duration':
def get_sort_key(job):
start = job.get('execution_start_time', 0)
end = job.get('execution_end_time', 0)
return end - start if end and start else 0
else:
def get_sort_key(job):
return job.get('create_time', 0)
return sorted(jobs, key=get_sort_key, reverse=reverse)
def get_job(prompt_id: str, running: list, queued: list, history: dict) -> Optional[dict]:
"""
Get a single job by prompt_id from history or queue.
Args:
prompt_id: The prompt ID to look up
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
Returns:
Job dict with full details, or None if not found
"""
if prompt_id in history:
return normalize_history_item(prompt_id, history[prompt_id], include_outputs=True)
for item in running:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.IN_PROGRESS)
for item in queued:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.PENDING)
return None
def get_all_jobs(
running: list,
queued: list,
history: dict,
status_filter: Optional[list[str]] = None,
workflow_id: Optional[str] = None,
sort_by: str = "created_at",
sort_order: str = "desc",
limit: Optional[int] = None,
offset: int = 0
) -> tuple[list[dict], int]:
"""
Get all jobs (running, pending, completed) with filtering and sorting.
Args:
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
status_filter: List of statuses to include (from JobStatus.ALL)
workflow_id: Filter by workflow ID
sort_by: Field to sort by ('created_at', 'execution_duration')
sort_order: 'asc' or 'desc'
limit: Maximum number of items to return
offset: Number of items to skip
Returns:
tuple: (jobs_list, total_count)
"""
jobs = []
if status_filter is None:
status_filter = JobStatus.ALL
if JobStatus.IN_PROGRESS in status_filter:
for item in running:
jobs.append(normalize_queue_item(item, JobStatus.IN_PROGRESS))
if JobStatus.PENDING in status_filter:
for item in queued:
jobs.append(normalize_queue_item(item, JobStatus.PENDING))
history_statuses = {JobStatus.COMPLETED, JobStatus.FAILED, JobStatus.CANCELLED}
requested_history_statuses = history_statuses & set(status_filter)
if requested_history_statuses:
for prompt_id, history_item in history.items():
job = normalize_history_item(prompt_id, history_item)
if job.get('status') in requested_history_statuses:
jobs.append(job)
if workflow_id:
jobs = [j for j in jobs if j.get('workflow_id') == workflow_id]
jobs = apply_sorting(jobs, sort_by, sort_order)
total_count = len(jobs)
if offset > 0:
jobs = jobs[offset:]
if limit is not None:
jobs = jobs[:limit]
return (jobs, total_count) | def is_previewable(media_type: str, item: dict) -> bool:
"""
Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz)
""" | Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz) | if media_type in PREVIEWABLE_MEDIA_TYPES:
return True
# Check format field (MIME type).
# Maintains backwards compatibility with how custom node outputs are handled in the frontend.
fmt = item.get('format', '')
if fmt and (fmt.startswith('video/') or fmt.startswith('audio/')):
return True
# Check for 3D files by extension
filename = item.get('filename', '').lower()
if any(filename.endswith(ext) for ext in THREE_D_EXTENSIONS):
return True
return False | def is_previewable(media_type: str, item: dict) -> bool:
"""
Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz)
"""
if media_type in PREVIEWABLE_MEDIA_TYPES:
return True
# Check format field (MIME type).
# Maintains backwards compatibility with how custom node outputs are handled in the frontend.
fmt = item.get('format', '')
if fmt and (fmt.startswith('video/') or fmt.startswith('audio/')):
return True
# Check for 3D files by extension
filename = item.get('filename', '').lower()
if any(filename.endswith(ext) for ext in THREE_D_EXTENSIONS):
return True
return False | [{"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_previewable_media_types", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_previewable_media_types -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_non_previewable_media_types", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_non_previewable_media_types -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_3d_extensions_previewable", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_3d_extensions_previewable -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_3d_extensions_case_insensitive", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_3d_extensions_case_insensitive -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_video_format_previewable", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_video_format_previewable -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_audio_format_previewable", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_audio_format_previewable -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestIsPreviewable.test_other_format_not_previewable", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestIsPreviewable::test_other_format_not_previewable -xvs"}] | {"repo_url": "https://github.com/Comfy-Org/ComfyUI", "install_cmd": "pip install -e .", "commit_sha": "dff0a4a15887383c90a031e3fd48ebc41f6928e7", "frozen_requirements": "frozen_requirements/Comfy-Org_ComfyUI.txt"} | {"body_lines": 12, "file_lines": 390, "has_docstring": true, "num_tests": 7} | {"status": "passed", "tests_run": 7} | repo_patch/0007 | file_overlap |
repo_patch/0004 | Comfy-Org/ComfyUI | middleware/cache_middleware.py | cache_control | cache_control | function | null | """Cache control middleware for ComfyUI server"""
from aiohttp import web
from typing import Callable, Awaitable
# Time in seconds
ONE_HOUR: int = 3600
ONE_DAY: int = 86400
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
@web.middleware
async def cache_control(
request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
) -> web.Response:
"""Cache control middleware that sets appropriate cache headers based on file type and response status"""
# TODO: Implement this function | async def cache_control(
request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
) -> web.Response:
"""Cache control middleware that sets appropriate cache headers based on file type and response status""" | Cache control middleware that sets appropriate cache headers based on file type and response status | response: web.Response = await handler(request)
path_filename = request.path.rsplit("/", 1)[-1]
is_entry_point = path_filename.startswith("index") and path_filename.endswith(
".json"
)
if request.path.endswith(".js") or request.path.endswith(".css") or is_entry_point:
response.headers.setdefault("Cache-Control", "no-cache")
return response
# Early return for non-image files - no cache headers needed
if not request.path.lower().endswith(IMG_EXTENSIONS):
return response
# Handle image files
if response.status == 404:
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}")
elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308):
# Success responses and permanent redirects - cache for 1 day
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}")
elif response.status in (302, 303, 307):
# Temporary redirects - no cache
response.headers.setdefault("Cache-Control", "no-cache")
# Note: 304 Not Modified falls through - no cache headers set
return response | async def cache_control(
request: web.Request, handler: Callable[[web.Request], Awaitable[web.Response]]
) -> web.Response:
"""Cache control middleware that sets appropriate cache headers based on file type and response status"""
response: web.Response = await handler(request)
path_filename = request.path.rsplit("/", 1)[-1]
is_entry_point = path_filename.startswith("index") and path_filename.endswith(
".json"
)
if request.path.endswith(".js") or request.path.endswith(".css") or is_entry_point:
response.headers.setdefault("Cache-Control", "no-cache")
return response
# Early return for non-image files - no cache headers needed
if not request.path.lower().endswith(IMG_EXTENSIONS):
return response
# Handle image files
if response.status == 404:
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_HOUR}")
elif response.status in (200, 201, 202, 203, 204, 205, 206, 301, 308):
# Success responses and permanent redirects - cache for 1 day
response.headers.setdefault("Cache-Control", f"public, max-age={ONE_DAY}")
elif response.status in (302, 303, 307):
# Temporary redirects - no cache
response.headers.setdefault("Cache-Control", "no-cache")
# Note: 304 Not Modified falls through - no cache headers set
return response | [{"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_cache_control_scenarios", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_cache_control_scenarios -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_all_image_extensions", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_all_image_extensions -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_image_status_codes", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_image_status_codes -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_case_insensitive_image_extension", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_case_insensitive_image_extension -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_edge_cases", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_edge_cases -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_js_preserves_existing_headers", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_js_preserves_existing_headers -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_css_preserves_existing_headers", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_css_preserves_existing_headers -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_image_preserves_existing_headers", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_image_preserves_existing_headers -xvs"}, {"test_file": "tests-unit/server_test/test_cache_control.py", "test_function": "TestCacheControl.test_304_not_modified_inherits_cache", "test_content": "\"\"\"Tests for server cache control middleware\"\"\"\n\nimport pytest\nfrom aiohttp import web\nfrom aiohttp.test_utils import make_mocked_request\nfrom typing import Dict, Any\n\nfrom middleware.cache_middleware import cache_control, ONE_HOUR, ONE_DAY, IMG_EXTENSIONS\n\npytestmark = pytest.mark.asyncio # Apply asyncio mark to all tests\n\n# Test configuration data\nCACHE_SCENARIOS = [\n # Image file scenarios\n {\n \"name\": \"image_200_status\",\n \"path\": \"/test.jpg\",\n \"status\": 200,\n \"expected_cache\": f\"public, max-age={ONE_DAY}\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"image_404_status\",\n \"path\": \"/missing.jpg\",\n \"status\": 404,\n \"expected_cache\": f\"public, max-age={ONE_HOUR}\",\n \"should_have_header\": True,\n },\n # JavaScript/CSS scenarios\n {\n \"name\": \"js_no_cache\",\n \"path\": \"/script.js\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"css_no_cache\",\n \"path\": \"/styles.css\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"index_json_no_cache\",\n \"path\": \"/api/index.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n {\n \"name\": \"localized_index_json_no_cache\",\n \"path\": \"/templates/index.zh.json\",\n \"status\": 200,\n \"expected_cache\": \"no-cache\",\n \"should_have_header\": True,\n },\n # Non-matching files\n {\n \"name\": \"html_no_header\",\n \"path\": \"/index.html\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"txt_no_header\",\n \"path\": \"/data.txt\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"api_endpoint_no_header\",\n \"path\": \"/api/endpoint\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n {\n \"name\": \"pdf_no_header\",\n \"path\": \"/file.pdf\",\n \"status\": 200,\n \"expected_cache\": None,\n \"should_have_header\": False,\n },\n]\n\n# Status code scenarios for images\nIMAGE_STATUS_SCENARIOS = [\n # Success statuses get long cache\n {\"status\": 200, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 201, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 202, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 204, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 206, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Permanent redirects get long cache\n {\"status\": 301, \"expected\": f\"public, max-age={ONE_DAY}\"},\n {\"status\": 308, \"expected\": f\"public, max-age={ONE_DAY}\"},\n # Temporary redirects get no cache\n {\"status\": 302, \"expected\": \"no-cache\"},\n {\"status\": 303, \"expected\": \"no-cache\"},\n {\"status\": 307, \"expected\": \"no-cache\"},\n # 404 gets short cache\n {\"status\": 404, \"expected\": f\"public, max-age={ONE_HOUR}\"},\n]\n\n# Case sensitivity test paths\nCASE_SENSITIVITY_PATHS = [\"/image.JPG\", \"/photo.PNG\", \"/pic.JpEg\"]\n\n# Edge case test paths\nEDGE_CASE_PATHS = [\n {\n \"name\": \"query_strings_ignored\",\n \"path\": \"/image.jpg?v=123&size=large\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"multiple_dots_in_path\",\n \"path\": \"/image.min.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n {\n \"name\": \"nested_paths_with_images\",\n \"path\": \"/static/images/photo.jpg\",\n \"expected\": f\"public, max-age={ONE_DAY}\",\n },\n]\n\n\nclass TestCacheControl:\n \"\"\"Test cache control middleware functionality\"\"\"\n\n @pytest.fixture\n def status_handler_factory(self):\n \"\"\"Create a factory for handlers that return specific status codes\"\"\"\n\n def factory(status: int, headers: Dict[str, str] = None):\n async def handler(request):\n return web.Response(status=status, headers=headers or {})\n\n return handler\n\n return factory\n\n @pytest.fixture\n def mock_handler(self, status_handler_factory):\n \"\"\"Create a mock handler that returns a response with 200 status\"\"\"\n return status_handler_factory(200)\n\n @pytest.fixture\n def handler_with_existing_cache(self, status_handler_factory):\n \"\"\"Create a handler that returns response with existing Cache-Control header\"\"\"\n return status_handler_factory(200, {\"Cache-Control\": \"max-age=3600\"})\n\n async def assert_cache_header(\n self,\n response: web.Response,\n expected_cache: str = None,\n should_have_header: bool = True,\n ):\n \"\"\"Helper to assert cache control headers\"\"\"\n if should_have_header:\n assert \"Cache-Control\" in response.headers\n if expected_cache:\n assert response.headers[\"Cache-Control\"] == expected_cache\n else:\n assert \"Cache-Control\" not in response.headers\n\n # Parameterized tests\n @pytest.mark.parametrize(\"scenario\", CACHE_SCENARIOS, ids=lambda x: x[\"name\"])\n async def test_cache_control_scenarios(\n self, scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test various cache control scenarios\"\"\"\n handler = status_handler_factory(scenario[\"status\"])\n request = make_mocked_request(\"GET\", scenario[\"path\"])\n response = await cache_control(request, handler)\n\n assert response.status == scenario[\"status\"]\n await self.assert_cache_header(\n response, scenario[\"expected_cache\"], scenario[\"should_have_header\"]\n )\n\n @pytest.mark.parametrize(\"ext\", IMG_EXTENSIONS)\n async def test_all_image_extensions(self, ext: str, mock_handler):\n \"\"\"Test all defined image extensions are handled correctly\"\"\"\n request = make_mocked_request(\"GET\", f\"/image{ext}\")\n response = await cache_control(request, mock_handler)\n\n assert response.status == 200\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\n \"status_scenario\", IMAGE_STATUS_SCENARIOS, ids=lambda x: f\"status_{x['status']}\"\n )\n async def test_image_status_codes(\n self, status_scenario: Dict[str, Any], status_handler_factory\n ):\n \"\"\"Test different status codes for image requests\"\"\"\n handler = status_handler_factory(status_scenario[\"status\"])\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == status_scenario[\"status\"]\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == status_scenario[\"expected\"]\n\n @pytest.mark.parametrize(\"path\", CASE_SENSITIVITY_PATHS)\n async def test_case_insensitive_image_extension(self, path: str, mock_handler):\n \"\"\"Test that image extensions are matched case-insensitively\"\"\"\n request = make_mocked_request(\"GET\", path)\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == f\"public, max-age={ONE_DAY}\"\n\n @pytest.mark.parametrize(\"edge_case\", EDGE_CASE_PATHS, ids=lambda x: x[\"name\"])\n async def test_edge_cases(self, edge_case: Dict[str, str], mock_handler):\n \"\"\"Test edge cases like query strings, nested paths, etc.\"\"\"\n request = make_mocked_request(\"GET\", edge_case[\"path\"])\n response = await cache_control(request, mock_handler)\n\n assert \"Cache-Control\" in response.headers\n assert response.headers[\"Cache-Control\"] == edge_case[\"expected\"]\n\n # Header preservation tests (special cases not covered by parameterization)\n async def test_js_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .js files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/script.js\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_css_preserves_existing_headers(self, handler_with_existing_cache):\n \"\"\"Test that .css files preserve existing Cache-Control headers\"\"\"\n request = make_mocked_request(\"GET\", \"/styles.css\")\n response = await cache_control(request, handler_with_existing_cache)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"max-age=3600\"\n\n async def test_image_preserves_existing_headers(self, status_handler_factory):\n \"\"\"Test that image cache headers preserve existing Cache-Control\"\"\"\n handler = status_handler_factory(200, {\"Cache-Control\": \"private, no-cache\"})\n request = make_mocked_request(\"GET\", \"/image.jpg\")\n response = await cache_control(request, handler)\n\n # setdefault should preserve existing header\n assert response.headers[\"Cache-Control\"] == \"private, no-cache\"\n\n async def test_304_not_modified_inherits_cache(self, status_handler_factory):\n \"\"\"Test that 304 Not Modified doesn't set cache headers for images\"\"\"\n handler = status_handler_factory(304, {\"Cache-Control\": \"max-age=7200\"})\n request = make_mocked_request(\"GET\", \"/not-modified.jpg\")\n response = await cache_control(request, handler)\n\n assert response.status == 304\n # Should preserve existing cache header, not override\n assert response.headers[\"Cache-Control\"] == \"max-age=7200\"\n", "framework": "pytest", "test_command": "pytest tests-unit/server_test/test_cache_control.py::TestCacheControl::test_304_not_modified_inherits_cache -xvs"}] | {"repo_url": "https://github.com/Comfy-Org/ComfyUI", "install_cmd": "pip install -e .", "commit_sha": "dff0a4a15887383c90a031e3fd48ebc41f6928e7", "frozen_requirements": "frozen_requirements/Comfy-Org_ComfyUI.txt"} | {"body_lines": 22, "file_lines": 54, "has_docstring": true, "num_tests": 9} | {"status": "passed", "tests_run": 9} | repo_patch/0008 | clean |
repo_patch/0005 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_base_model | _get_whisper_base_model | function | null | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
# TODO: Implement this function
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native" | def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon) | Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base. | try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | [{"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_model_selectors_mlx_and_native_paths", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_model_selectors_mlx_and_native_paths -xvs"}, {"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_selector_import_errors_force_native", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_selector_import_errors_force_native -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 2} | {"status": "passed", "tests_run": 2} | repo_patch/0009 | clean |
repo_patch/0006 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_tiny_model | _get_whisper_tiny_model | function | null | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
# TODO: Implement this function
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native" | def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon) | Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny. | try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | [{"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_model_selectors_mlx_and_native_paths", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_model_selectors_mlx_and_native_paths -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0010 | clean |
repo_patch/0007 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_medium_model | _get_whisper_medium_model | function | null | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
# TODO: Implement this function
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native" | def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon) | Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium. | try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | [{"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_model_selectors_mlx_and_native_paths", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_model_selectors_mlx_and_native_paths -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0011 | clean |
repo_patch/0008 | docling-project/docling | docling/backend/mets_gbs_backend.py | unload | MetsGbsPageBackend.unload | method | MetsGbsPageBackend | """Backend for GBS Google Books schema."""
import logging
import tarfile
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
from docling_core.types.doc import BoundingBox, CoordOrigin, Size
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from lxml import etree
from PIL import Image
from PIL.Image import Image as PILImage
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.base_models import InputFormat
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
def _get_pdf_page_geometry(
size: Size,
) -> PdfPageGeometry:
boundary_type: PdfPageBoundaryType = PdfPageBoundaryType.CROP_BOX
bbox_tuple = (0, 0, size.width, size.height)
bbox = BoundingBox.from_tuple(bbox_tuple, CoordOrigin.TOPLEFT)
return PdfPageGeometry(
angle=0.0,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=boundary_type,
art_bbox=bbox,
bleed_bbox=bbox,
crop_bbox=bbox,
media_bbox=bbox,
trim_bbox=bbox,
)
class MetsGbsPageBackend(PdfPageBackend):
def __init__(self, parsed_page: SegmentedPdfPage, page_im: PILImage):
self._im = page_im
self._dpage = parsed_page
self.valid = parsed_page is not None
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i, cell in enumerate(self._dpage.textline_cells):
cell_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page_size.height)
.scaled(scale)
)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell.text
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
return self._dpage
def get_text_cells(self) -> Iterable[TextCell]:
return self._dpage.textline_cells
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
images = self._dpage.bitmap_resources
for img in images:
cropbox = img.rect.to_bounding_box().to_top_left_origin(
self.get_size().height
)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
assert (
page_size.width == self._im.size[0] and page_size.height == self._im.size[1]
)
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
image = self._im.resize(
size=(round(page_size.width * scale), round(page_size.height * scale))
).crop(cropbox.scaled(scale=scale).as_tuple())
return image
def get_size(self) -> Size:
return Size(
width=self._dpage.dimension.width, height=self._dpage.dimension.height
)
def unload(self) -> None:
# TODO: Implement this function
class _UseType(str, Enum):
IMAGE = "image"
OCR = "OCR"
COORD_OCR = "coordOCR"
@dataclass
class _FileInfo:
file_id: str
mimetype: str
path: str
use: _UseType
@dataclass
class _PageFiles:
image: Optional[_FileInfo] = None
ocr: Optional[_FileInfo] = None
coordOCR: Optional[_FileInfo] = None
def _extract_rect(title_str: str) -> Optional[BoundingRectangle]:
"""
Extracts bbox from title string like 'bbox 279 177 306 214;x_wconf 97'
"""
parts = title_str.split(";")
for part in parts:
part = part.strip()
if part.startswith("bbox "):
try:
coords = part.split()[1:]
rect = BoundingRectangle.from_bounding_box(
bbox=BoundingBox.from_tuple(
tuple(map(int, coords)), origin=CoordOrigin.TOPLEFT
)
)
return rect
except Exception:
return None
return None
def _extract_confidence(title_str) -> float:
"""Extracts x_wconf (OCR confidence) value from title string."""
for part in title_str.split(";"):
part = part.strip()
if part.startswith("x_wconf"):
try:
return float(part.split()[1]) / 100.0
except Exception:
return 1
return 1
class MetsGbsDocumentBackend(PdfDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self._tar: tarfile.TarFile = (
tarfile.open(name=self.path_or_stream, mode="r:gz")
if isinstance(self.path_or_stream, Path)
else tarfile.open(fileobj=self.path_or_stream, mode="r:gz")
)
self.root_mets: Optional[etree._Element] = None
self.page_map: Dict[int, _PageFiles] = {}
for member in self._tar.getmembers():
if member.name.endswith(".xml"):
file = self._tar.extractfile(member)
if file is not None:
content = file.read()
self.root_mets = self._validate_mets_xml(content)
if self.root_mets is not None:
break
if self.root_mets is None:
raise RuntimeError(
f"METS GBS backend could not load document {self.document_hash}."
)
ns = {
"mets": "http://www.loc.gov/METS/",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"gbs": "http://books.google.com/gbs",
"premis": "info:lc/xmlns/premis-v2",
"marc": "http://www.loc.gov/MARC21/slim",
}
file_info_by_id: Dict[str, _FileInfo] = {}
for filegrp in self.root_mets.xpath(".//mets:fileGrp", namespaces=ns):
use_raw = filegrp.get("USE")
try:
use = _UseType(use_raw)
except ValueError:
continue # Ignore unknown USE types
for file_elem in filegrp.xpath("./mets:file", namespaces=ns):
file_id = file_elem.get("ID")
mimetype = file_elem.get("MIMETYPE")
flocat_elem = file_elem.find("mets:FLocat", namespaces=ns)
href = (
flocat_elem.get("{http://www.w3.org/1999/xlink}href")
if flocat_elem is not None
else None
)
if href is None:
continue
file_info_by_id[file_id] = _FileInfo(
file_id=file_id, mimetype=mimetype, path=href, use=use
)
USE_TO_ATTR = {
_UseType.IMAGE: "image",
_UseType.OCR: "ocr",
_UseType.COORD_OCR: "coordOCR",
}
for div in self.root_mets.xpath('.//mets:div[@TYPE="page"]', namespaces=ns):
order_str = div.get("ORDER")
if not order_str:
continue
try:
page_no = int(order_str) - 1 # make 0-index pages
except ValueError:
continue
page_files = _PageFiles()
for fptr in div.xpath("./mets:fptr", namespaces=ns):
file_id = fptr.get("FILEID")
file_info = file_info_by_id.get(file_id)
if file_info:
attr = USE_TO_ATTR.get(file_info.use)
if attr:
setattr(page_files, attr, file_info)
self.page_map[page_no] = page_files
def _validate_mets_xml(self, xml_string) -> Optional[etree._Element]:
root: etree._Element = etree.fromstring(xml_string)
if (
root.tag == "{http://www.loc.gov/METS/}mets"
and root.get("PROFILE") == "gbs"
):
return root
_log.warning(f"The root element is not <mets:mets> with PROFILE='gbs': {root}")
return None
def _parse_page(self, page_no: int) -> Tuple[SegmentedPdfPage, PILImage]:
# TODO: use better fallbacks...
image_info = self.page_map[page_no].image
assert image_info is not None
ocr_info = self.page_map[page_no].coordOCR
assert ocr_info is not None
image_file = self._tar.extractfile(image_info.path)
assert image_file is not None
buf = BytesIO(image_file.read())
im: PILImage = Image.open(buf)
ocr_file = self._tar.extractfile(ocr_info.path)
assert ocr_file is not None
ocr_content = ocr_file.read()
parser = etree.HTMLParser()
ocr_root: etree._Element = etree.fromstring(ocr_content, parser=parser)
line_cells: List[TextCell] = []
word_cells: List[TextCell] = []
page_div = ocr_root.xpath("//div[@class='ocr_page']")
size = Size(width=im.size[0], height=im.size[1])
if page_div:
title = page_div[0].attrib.get("title", "")
rect = _extract_rect(title)
if rect:
size = Size(width=rect.width, height=rect.height)
else:
_log.error(f"Could not find ocr_page for page {page_no}")
im = im.resize(size=(round(size.width), round(size.height)))
im = im.convert("RGB")
# Extract all ocrx_word spans
for ix, word in enumerate(ocr_root.xpath("//span[@class='ocrx_word']")):
text = "".join(word.itertext()).strip()
title = word.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
word_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
# Extract all ocr_line spans
# line: etree._Element
for ix, line in enumerate(ocr_root.xpath("//span[@class='ocr_line']")):
text = "".join(line.itertext()).strip()
title = line.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
line_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
page = SegmentedPdfPage(
dimension=_get_pdf_page_geometry(size),
textline_cells=line_cells,
char_cells=[],
word_cells=word_cells,
has_textlines=True,
has_words=True,
has_chars=False,
)
return page, im
def page_count(self) -> int:
return len(self.page_map)
def load_page(self, page_no: int) -> MetsGbsPageBackend:
# TODO: is this thread-safe?
page, im = self._parse_page(page_no)
return MetsGbsPageBackend(parsed_page=page, page_im=im)
def is_valid(self) -> bool:
return self.root_mets is not None and self.page_count() > 0
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return {InputFormat.METS_GBS}
@classmethod
def supports_pagination(cls) -> bool:
return True
def unload(self) -> None:
super().unload()
self._tar.close() | def unload(self) -> None: | if hasattr(self, "_im"):
delattr(self, "_im")
if hasattr(self, "_dpage"):
delattr(self, "_dpage") | def unload(self) -> None:
if hasattr(self, "_im"):
delattr(self, "_im")
if hasattr(self, "_dpage"):
delattr(self, "_dpage") | [{"test_file": "tests/test_backend_mets_gbs.py", "test_function": "test_process_pages", "test_content": "from pathlib import Path\n\nimport pytest\n\nfrom docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend\nfrom docling.datamodel.base_models import BoundingBox, InputFormat\nfrom docling.datamodel.document import InputDocument\n\n\n@pytest.fixture\ndef test_doc_path():\n return Path(\"tests/data/mets_gbs/32044009881525_select.tar.gz\")\n\n\ndef _get_backend(pdf_doc):\n in_doc = InputDocument(\n path_or_stream=pdf_doc,\n format=InputFormat.METS_GBS,\n backend=MetsGbsDocumentBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend\n\n\ndef test_process_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n\n for page_index in range(doc_backend.page_count()):\n page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)\n list(page_backend.get_text_cells())\n\n # Clean up page backend after each iteration\n page_backend.unload()\n\n # Explicitly clean up document backend to prevent race conditions in CI\n doc_backend.unload()\n\n\ndef test_get_text_from_rect(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n # Get the title text of the DocLayNet paper\n textpiece = page_backend.get_text_in_rect(\n bbox=BoundingBox(l=275, t=263, r=1388, b=311)\n )\n ref = \"recently become prevalent that he who speaks\"\n\n assert textpiece.strip() == ref\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_crop_page_image(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n page_backend.get_page_image(\n scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)\n )\n # im.show()\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_num_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n assert doc_backend.is_valid()\n assert doc_backend.page_count() == 3\n\n # Explicitly clean up resources to prevent race conditions in CI\n doc_backend.unload()\n", "framework": "pytest", "test_command": "pytest tests/test_backend_mets_gbs.py::test_process_pages -xvs"}, {"test_file": "tests/test_backend_mets_gbs.py", "test_function": "test_get_text_from_rect", "test_content": "from pathlib import Path\n\nimport pytest\n\nfrom docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend\nfrom docling.datamodel.base_models import BoundingBox, InputFormat\nfrom docling.datamodel.document import InputDocument\n\n\n@pytest.fixture\ndef test_doc_path():\n return Path(\"tests/data/mets_gbs/32044009881525_select.tar.gz\")\n\n\ndef _get_backend(pdf_doc):\n in_doc = InputDocument(\n path_or_stream=pdf_doc,\n format=InputFormat.METS_GBS,\n backend=MetsGbsDocumentBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend\n\n\ndef test_process_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n\n for page_index in range(doc_backend.page_count()):\n page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)\n list(page_backend.get_text_cells())\n\n # Clean up page backend after each iteration\n page_backend.unload()\n\n # Explicitly clean up document backend to prevent race conditions in CI\n doc_backend.unload()\n\n\ndef test_get_text_from_rect(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n # Get the title text of the DocLayNet paper\n textpiece = page_backend.get_text_in_rect(\n bbox=BoundingBox(l=275, t=263, r=1388, b=311)\n )\n ref = \"recently become prevalent that he who speaks\"\n\n assert textpiece.strip() == ref\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_crop_page_image(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n page_backend.get_page_image(\n scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)\n )\n # im.show()\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_num_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n assert doc_backend.is_valid()\n assert doc_backend.page_count() == 3\n\n # Explicitly clean up resources to prevent race conditions in CI\n doc_backend.unload()\n", "framework": "pytest", "test_command": "pytest tests/test_backend_mets_gbs.py::test_get_text_from_rect -xvs"}, {"test_file": "tests/test_backend_mets_gbs.py", "test_function": "test_crop_page_image", "test_content": "from pathlib import Path\n\nimport pytest\n\nfrom docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend\nfrom docling.datamodel.base_models import BoundingBox, InputFormat\nfrom docling.datamodel.document import InputDocument\n\n\n@pytest.fixture\ndef test_doc_path():\n return Path(\"tests/data/mets_gbs/32044009881525_select.tar.gz\")\n\n\ndef _get_backend(pdf_doc):\n in_doc = InputDocument(\n path_or_stream=pdf_doc,\n format=InputFormat.METS_GBS,\n backend=MetsGbsDocumentBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend\n\n\ndef test_process_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n\n for page_index in range(doc_backend.page_count()):\n page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)\n list(page_backend.get_text_cells())\n\n # Clean up page backend after each iteration\n page_backend.unload()\n\n # Explicitly clean up document backend to prevent race conditions in CI\n doc_backend.unload()\n\n\ndef test_get_text_from_rect(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n # Get the title text of the DocLayNet paper\n textpiece = page_backend.get_text_in_rect(\n bbox=BoundingBox(l=275, t=263, r=1388, b=311)\n )\n ref = \"recently become prevalent that he who speaks\"\n\n assert textpiece.strip() == ref\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_crop_page_image(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n page_backend.get_page_image(\n scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)\n )\n # im.show()\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_num_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n assert doc_backend.is_valid()\n assert doc_backend.page_count() == 3\n\n # Explicitly clean up resources to prevent race conditions in CI\n doc_backend.unload()\n", "framework": "pytest", "test_command": "pytest tests/test_backend_mets_gbs.py::test_crop_page_image -xvs"}, {"test_file": "tests/test_backend_mets_gbs.py", "test_function": "test_num_pages", "test_content": "from pathlib import Path\n\nimport pytest\n\nfrom docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend\nfrom docling.datamodel.base_models import BoundingBox, InputFormat\nfrom docling.datamodel.document import InputDocument\n\n\n@pytest.fixture\ndef test_doc_path():\n return Path(\"tests/data/mets_gbs/32044009881525_select.tar.gz\")\n\n\ndef _get_backend(pdf_doc):\n in_doc = InputDocument(\n path_or_stream=pdf_doc,\n format=InputFormat.METS_GBS,\n backend=MetsGbsDocumentBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend\n\n\ndef test_process_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n\n for page_index in range(doc_backend.page_count()):\n page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)\n list(page_backend.get_text_cells())\n\n # Clean up page backend after each iteration\n page_backend.unload()\n\n # Explicitly clean up document backend to prevent race conditions in CI\n doc_backend.unload()\n\n\ndef test_get_text_from_rect(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n # Get the title text of the DocLayNet paper\n textpiece = page_backend.get_text_in_rect(\n bbox=BoundingBox(l=275, t=263, r=1388, b=311)\n )\n ref = \"recently become prevalent that he who speaks\"\n\n assert textpiece.strip() == ref\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_crop_page_image(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n page_backend.get_page_image(\n scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)\n )\n # im.show()\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_num_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n assert doc_backend.is_valid()\n assert doc_backend.page_count() == 3\n\n # Explicitly clean up resources to prevent race conditions in CI\n doc_backend.unload()\n", "framework": "pytest", "test_command": "pytest tests/test_backend_mets_gbs.py::test_num_pages -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 4, "file_lines": 400, "has_docstring": false, "num_tests": 4} | {"status": "passed", "tests_run": 4} | repo_patch/0012 | file_overlap | |
repo_patch/0009 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_small_model | _get_whisper_small_model | function | null | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
# TODO: Implement this function
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native" | def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon) | Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small. | try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | [{"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_model_selectors_mlx_and_native_paths", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_model_selectors_mlx_and_native_paths -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0013 | clean |
repo_patch/0010 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_large_model | _get_whisper_large_model | function | null | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
# TODO: Implement this function
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native" | def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon) | Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large. | try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | [{"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_model_selectors_mlx_and_native_paths", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_model_selectors_mlx_and_native_paths -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0014 | clean |
repo_patch/0011 | docling-project/docling | docling/backend/mets_gbs_backend.py | get_text_in_rect | MetsGbsPageBackend.get_text_in_rect | method | MetsGbsPageBackend | """Backend for GBS Google Books schema."""
import logging
import tarfile
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
from docling_core.types.doc import BoundingBox, CoordOrigin, Size
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from lxml import etree
from PIL import Image
from PIL.Image import Image as PILImage
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.base_models import InputFormat
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
def _get_pdf_page_geometry(
size: Size,
) -> PdfPageGeometry:
boundary_type: PdfPageBoundaryType = PdfPageBoundaryType.CROP_BOX
bbox_tuple = (0, 0, size.width, size.height)
bbox = BoundingBox.from_tuple(bbox_tuple, CoordOrigin.TOPLEFT)
return PdfPageGeometry(
angle=0.0,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=boundary_type,
art_bbox=bbox,
bleed_bbox=bbox,
crop_bbox=bbox,
media_bbox=bbox,
trim_bbox=bbox,
)
class MetsGbsPageBackend(PdfPageBackend):
def __init__(self, parsed_page: SegmentedPdfPage, page_im: PILImage):
self._im = page_im
self._dpage = parsed_page
self.valid = parsed_page is not None
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
# Find intersecting cells on the page
# TODO: Implement this function
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
return self._dpage
def get_text_cells(self) -> Iterable[TextCell]:
return self._dpage.textline_cells
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
images = self._dpage.bitmap_resources
for img in images:
cropbox = img.rect.to_bounding_box().to_top_left_origin(
self.get_size().height
)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
assert (
page_size.width == self._im.size[0] and page_size.height == self._im.size[1]
)
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
image = self._im.resize(
size=(round(page_size.width * scale), round(page_size.height * scale))
).crop(cropbox.scaled(scale=scale).as_tuple())
return image
def get_size(self) -> Size:
return Size(
width=self._dpage.dimension.width, height=self._dpage.dimension.height
)
def unload(self) -> None:
if hasattr(self, "_im"):
delattr(self, "_im")
if hasattr(self, "_dpage"):
delattr(self, "_dpage")
class _UseType(str, Enum):
IMAGE = "image"
OCR = "OCR"
COORD_OCR = "coordOCR"
@dataclass
class _FileInfo:
file_id: str
mimetype: str
path: str
use: _UseType
@dataclass
class _PageFiles:
image: Optional[_FileInfo] = None
ocr: Optional[_FileInfo] = None
coordOCR: Optional[_FileInfo] = None
def _extract_rect(title_str: str) -> Optional[BoundingRectangle]:
"""
Extracts bbox from title string like 'bbox 279 177 306 214;x_wconf 97'
"""
parts = title_str.split(";")
for part in parts:
part = part.strip()
if part.startswith("bbox "):
try:
coords = part.split()[1:]
rect = BoundingRectangle.from_bounding_box(
bbox=BoundingBox.from_tuple(
tuple(map(int, coords)), origin=CoordOrigin.TOPLEFT
)
)
return rect
except Exception:
return None
return None
def _extract_confidence(title_str) -> float:
"""Extracts x_wconf (OCR confidence) value from title string."""
for part in title_str.split(";"):
part = part.strip()
if part.startswith("x_wconf"):
try:
return float(part.split()[1]) / 100.0
except Exception:
return 1
return 1
class MetsGbsDocumentBackend(PdfDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self._tar: tarfile.TarFile = (
tarfile.open(name=self.path_or_stream, mode="r:gz")
if isinstance(self.path_or_stream, Path)
else tarfile.open(fileobj=self.path_or_stream, mode="r:gz")
)
self.root_mets: Optional[etree._Element] = None
self.page_map: Dict[int, _PageFiles] = {}
for member in self._tar.getmembers():
if member.name.endswith(".xml"):
file = self._tar.extractfile(member)
if file is not None:
content = file.read()
self.root_mets = self._validate_mets_xml(content)
if self.root_mets is not None:
break
if self.root_mets is None:
raise RuntimeError(
f"METS GBS backend could not load document {self.document_hash}."
)
ns = {
"mets": "http://www.loc.gov/METS/",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"gbs": "http://books.google.com/gbs",
"premis": "info:lc/xmlns/premis-v2",
"marc": "http://www.loc.gov/MARC21/slim",
}
file_info_by_id: Dict[str, _FileInfo] = {}
for filegrp in self.root_mets.xpath(".//mets:fileGrp", namespaces=ns):
use_raw = filegrp.get("USE")
try:
use = _UseType(use_raw)
except ValueError:
continue # Ignore unknown USE types
for file_elem in filegrp.xpath("./mets:file", namespaces=ns):
file_id = file_elem.get("ID")
mimetype = file_elem.get("MIMETYPE")
flocat_elem = file_elem.find("mets:FLocat", namespaces=ns)
href = (
flocat_elem.get("{http://www.w3.org/1999/xlink}href")
if flocat_elem is not None
else None
)
if href is None:
continue
file_info_by_id[file_id] = _FileInfo(
file_id=file_id, mimetype=mimetype, path=href, use=use
)
USE_TO_ATTR = {
_UseType.IMAGE: "image",
_UseType.OCR: "ocr",
_UseType.COORD_OCR: "coordOCR",
}
for div in self.root_mets.xpath('.//mets:div[@TYPE="page"]', namespaces=ns):
order_str = div.get("ORDER")
if not order_str:
continue
try:
page_no = int(order_str) - 1 # make 0-index pages
except ValueError:
continue
page_files = _PageFiles()
for fptr in div.xpath("./mets:fptr", namespaces=ns):
file_id = fptr.get("FILEID")
file_info = file_info_by_id.get(file_id)
if file_info:
attr = USE_TO_ATTR.get(file_info.use)
if attr:
setattr(page_files, attr, file_info)
self.page_map[page_no] = page_files
def _validate_mets_xml(self, xml_string) -> Optional[etree._Element]:
root: etree._Element = etree.fromstring(xml_string)
if (
root.tag == "{http://www.loc.gov/METS/}mets"
and root.get("PROFILE") == "gbs"
):
return root
_log.warning(f"The root element is not <mets:mets> with PROFILE='gbs': {root}")
return None
def _parse_page(self, page_no: int) -> Tuple[SegmentedPdfPage, PILImage]:
# TODO: use better fallbacks...
image_info = self.page_map[page_no].image
assert image_info is not None
ocr_info = self.page_map[page_no].coordOCR
assert ocr_info is not None
image_file = self._tar.extractfile(image_info.path)
assert image_file is not None
buf = BytesIO(image_file.read())
im: PILImage = Image.open(buf)
ocr_file = self._tar.extractfile(ocr_info.path)
assert ocr_file is not None
ocr_content = ocr_file.read()
parser = etree.HTMLParser()
ocr_root: etree._Element = etree.fromstring(ocr_content, parser=parser)
line_cells: List[TextCell] = []
word_cells: List[TextCell] = []
page_div = ocr_root.xpath("//div[@class='ocr_page']")
size = Size(width=im.size[0], height=im.size[1])
if page_div:
title = page_div[0].attrib.get("title", "")
rect = _extract_rect(title)
if rect:
size = Size(width=rect.width, height=rect.height)
else:
_log.error(f"Could not find ocr_page for page {page_no}")
im = im.resize(size=(round(size.width), round(size.height)))
im = im.convert("RGB")
# Extract all ocrx_word spans
for ix, word in enumerate(ocr_root.xpath("//span[@class='ocrx_word']")):
text = "".join(word.itertext()).strip()
title = word.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
word_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
# Extract all ocr_line spans
# line: etree._Element
for ix, line in enumerate(ocr_root.xpath("//span[@class='ocr_line']")):
text = "".join(line.itertext()).strip()
title = line.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
line_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
page = SegmentedPdfPage(
dimension=_get_pdf_page_geometry(size),
textline_cells=line_cells,
char_cells=[],
word_cells=word_cells,
has_textlines=True,
has_words=True,
has_chars=False,
)
return page, im
def page_count(self) -> int:
return len(self.page_map)
def load_page(self, page_no: int) -> MetsGbsPageBackend:
# TODO: is this thread-safe?
page, im = self._parse_page(page_no)
return MetsGbsPageBackend(parsed_page=page, page_im=im)
def is_valid(self) -> bool:
return self.root_mets is not None and self.page_count() > 0
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return {InputFormat.METS_GBS}
@classmethod
def supports_pagination(cls) -> bool:
return True
def unload(self) -> None:
super().unload()
self._tar.close() | def get_text_in_rect(self, bbox: BoundingBox) -> str:
# Find intersecting cells on the page | text_piece = ""
page_size = self.get_size()
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i, cell in enumerate(self._dpage.textline_cells):
cell_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page_size.height)
.scaled(scale)
)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell.text
return text_piece | def get_text_in_rect(self, bbox: BoundingBox) -> str:
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i, cell in enumerate(self._dpage.textline_cells):
cell_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page_size.height)
.scaled(scale)
)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell.text
return text_piece | [{"test_file": "tests/test_backend_mets_gbs.py", "test_function": "test_get_text_from_rect", "test_content": "from pathlib import Path\n\nimport pytest\n\nfrom docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend\nfrom docling.datamodel.base_models import BoundingBox, InputFormat\nfrom docling.datamodel.document import InputDocument\n\n\n@pytest.fixture\ndef test_doc_path():\n return Path(\"tests/data/mets_gbs/32044009881525_select.tar.gz\")\n\n\ndef _get_backend(pdf_doc):\n in_doc = InputDocument(\n path_or_stream=pdf_doc,\n format=InputFormat.METS_GBS,\n backend=MetsGbsDocumentBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend\n\n\ndef test_process_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n\n for page_index in range(doc_backend.page_count()):\n page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)\n list(page_backend.get_text_cells())\n\n # Clean up page backend after each iteration\n page_backend.unload()\n\n # Explicitly clean up document backend to prevent race conditions in CI\n doc_backend.unload()\n\n\ndef test_get_text_from_rect(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n # Get the title text of the DocLayNet paper\n textpiece = page_backend.get_text_in_rect(\n bbox=BoundingBox(l=275, t=263, r=1388, b=311)\n )\n ref = \"recently become prevalent that he who speaks\"\n\n assert textpiece.strip() == ref\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_crop_page_image(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n page_backend.get_page_image(\n scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)\n )\n # im.show()\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_num_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n assert doc_backend.is_valid()\n assert doc_backend.page_count() == 3\n\n # Explicitly clean up resources to prevent race conditions in CI\n doc_backend.unload()\n", "framework": "pytest", "test_command": "pytest tests/test_backend_mets_gbs.py::test_get_text_from_rect -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 17, "file_lines": 400, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0015 | file_overlap | |
repo_patch/0012 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_turbo_model | _get_whisper_turbo_model | function | null | import logging
from enum import Enum
from pydantic import (
AnyUrl,
)
from docling.datamodel.accelerator_options import AcceleratorDevice
from docling.datamodel.pipeline_options_asr_model import (
# AsrResponseFormat,
# ApiAsrOptions,
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
InlineAsrNativeWhisperOptions,
TransformersModelType,
)
_log = logging.getLogger(__name__)
def _get_whisper_tiny_model():
"""
Get the best Whisper Tiny model for the current hardware.
Automatically selects MLX Whisper Tiny for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Tiny.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_TINY = _get_whisper_tiny_model()
def _get_whisper_small_model():
"""
Get the best Whisper Small model for the current hardware.
Automatically selects MLX Whisper Small for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Small.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_SMALL = _get_whisper_small_model()
def _get_whisper_medium_model():
"""
Get the best Whisper Medium model for the current hardware.
Automatically selects MLX Whisper Medium for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Medium.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_MEDIUM = _get_whisper_medium_model()
def _get_whisper_base_model():
"""
Get the best Whisper Base model for the current hardware.
Automatically selects MLX Whisper Base for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Base.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_BASE = _get_whisper_base_model()
def _get_whisper_large_model():
"""
Get the best Whisper Large model for the current hardware.
Automatically selects MLX Whisper Large for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Large.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Create the model instance
WHISPER_LARGE = _get_whisper_large_model()
def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
# TODO: Implement this function
# Create the model instance
WHISPER_TURBO = _get_whisper_turbo_model()
# Explicit MLX Whisper model options for users who want to force MLX usage
WHISPER_TINY_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_SMALL_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-small-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_MEDIUM_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-medium-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_BASE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-base-mlx",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_LARGE_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-large-mlx-8bit",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
WHISPER_TURBO_MLX = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
# Explicit Native Whisper model options for users who want to force native usage
WHISPER_TINY_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="tiny",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_SMALL_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="small",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_MEDIUM_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="medium",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_BASE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="base",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_LARGE_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="large",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
WHISPER_TURBO_NATIVE = InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
)
# Note: The main WHISPER_* models (WHISPER_TURBO, WHISPER_BASE, etc.) automatically
# select the best implementation (MLX on Apple Silicon, Native elsewhere).
# Use the explicit _MLX or _NATIVE variants if you need to force a specific implementation.
class AsrModelType(str, Enum):
# Auto-selecting models (choose best implementation for hardware)
WHISPER_TINY = "whisper_tiny"
WHISPER_SMALL = "whisper_small"
WHISPER_MEDIUM = "whisper_medium"
WHISPER_BASE = "whisper_base"
WHISPER_LARGE = "whisper_large"
WHISPER_TURBO = "whisper_turbo"
# Explicit MLX models (force MLX implementation)
WHISPER_TINY_MLX = "whisper_tiny_mlx"
WHISPER_SMALL_MLX = "whisper_small_mlx"
WHISPER_MEDIUM_MLX = "whisper_medium_mlx"
WHISPER_BASE_MLX = "whisper_base_mlx"
WHISPER_LARGE_MLX = "whisper_large_mlx"
WHISPER_TURBO_MLX = "whisper_turbo_mlx"
# Explicit Native models (force native implementation)
WHISPER_TINY_NATIVE = "whisper_tiny_native"
WHISPER_SMALL_NATIVE = "whisper_small_native"
WHISPER_MEDIUM_NATIVE = "whisper_medium_native"
WHISPER_BASE_NATIVE = "whisper_base_native"
WHISPER_LARGE_NATIVE = "whisper_large_native"
WHISPER_TURBO_NATIVE = "whisper_turbo_native" | def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon) | Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo. | try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | def _get_whisper_turbo_model():
"""
Get the best Whisper Turbo model for the current hardware.
Automatically selects MLX Whisper Turbo for Apple Silicon (MPS) if available,
otherwise falls back to native Whisper Turbo.
"""
# Check if MPS is available (Apple Silicon)
try:
import torch
has_mps = torch.backends.mps.is_built() and torch.backends.mps.is_available()
except ImportError:
has_mps = False
# Check if mlx-whisper is available
try:
import mlx_whisper # type: ignore
has_mlx_whisper = True
except ImportError:
has_mlx_whisper = False
# Use MLX Whisper if both MPS and mlx-whisper are available
if has_mps and has_mlx_whisper:
return InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-turbo",
inference_framework=InferenceAsrFramework.MLX,
language="en",
task="transcribe",
word_timestamps=True,
no_speech_threshold=0.6,
logprob_threshold=-1.0,
compression_ratio_threshold=2.4,
)
else:
return InlineAsrNativeWhisperOptions(
repo_id="turbo",
inference_framework=InferenceAsrFramework.WHISPER,
verbose=True,
timestamps=True,
word_timestamps=True,
temperature=0.0,
max_new_tokens=256,
max_time_chunk=30.0,
) | [{"test_file": "tests/test_asr_mlx_whisper.py", "test_function": "TestMlxWhisperIntegration.test_model_selectors_mlx_and_native_paths", "test_content": "\"\"\"\nTest MLX Whisper integration for Apple Silicon ASR pipeline.\n\"\"\"\n\nimport sys\nfrom pathlib import Path\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions\nfrom docling.datamodel.asr_model_specs import (\n WHISPER_BASE,\n WHISPER_BASE_MLX,\n WHISPER_LARGE,\n WHISPER_LARGE_MLX,\n WHISPER_MEDIUM,\n WHISPER_SMALL,\n WHISPER_TINY,\n WHISPER_TURBO,\n)\nfrom docling.datamodel.pipeline_options import AsrPipelineOptions\nfrom docling.datamodel.pipeline_options_asr_model import (\n InferenceAsrFramework,\n InlineAsrMlxWhisperOptions,\n)\nfrom docling.pipeline.asr_pipeline import AsrPipeline, _MlxWhisperModel\n\n\nclass TestMlxWhisperIntegration:\n \"\"\"Test MLX Whisper model integration.\"\"\"\n\n def test_mlx_whisper_options_creation(self):\n \"\"\"Test that MLX Whisper options are created correctly.\"\"\"\n options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n )\n\n assert options.inference_framework == InferenceAsrFramework.MLX\n assert options.repo_id == \"mlx-community/whisper-tiny-mlx\"\n assert options.language == \"en\"\n assert options.task == \"transcribe\"\n assert options.word_timestamps is True\n assert AcceleratorDevice.MPS in options.supported_devices\n\n def test_whisper_models_auto_select_mlx(self):\n \"\"\"Test that Whisper models automatically select MLX when MPS and mlx-whisper are available.\"\"\"\n # This test verifies that the models are correctly configured\n # In a real Apple Silicon environment with mlx-whisper installed,\n # these models would automatically use MLX\n\n # Check that the models exist and have the correct structure\n assert hasattr(WHISPER_TURBO, \"inference_framework\")\n assert hasattr(WHISPER_TURBO, \"repo_id\")\n\n assert hasattr(WHISPER_BASE, \"inference_framework\")\n assert hasattr(WHISPER_BASE, \"repo_id\")\n\n assert hasattr(WHISPER_SMALL, \"inference_framework\")\n assert hasattr(WHISPER_SMALL, \"repo_id\")\n\n def test_explicit_mlx_models_shape(self):\n \"\"\"Explicit MLX options should have MLX framework and valid repos.\"\"\"\n assert WHISPER_BASE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_LARGE_MLX.inference_framework.name == \"MLX\"\n assert WHISPER_BASE_MLX.repo_id.startswith(\"mlx-community/\")\n\n def test_model_selectors_mlx_and_native_paths(self, monkeypatch):\n \"\"\"Cover MLX/native selection branches in asr_model_specs getters.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Force MLX path\n class _Mps:\n def is_built(self):\n return True\n\n def is_available(self):\n return True\n\n class _Torch:\n class backends:\n mps = _Mps()\n\n monkeypatch.setitem(sys.modules, \"torch\", _Torch())\n monkeypatch.setitem(sys.modules, \"mlx_whisper\", object())\n\n m_tiny = specs._get_whisper_tiny_model()\n m_small = specs._get_whisper_small_model()\n m_base = specs._get_whisper_base_model()\n m_medium = specs._get_whisper_medium_model()\n m_large = specs._get_whisper_large_model()\n m_turbo = specs._get_whisper_turbo_model()\n assert (\n m_tiny.inference_framework == InferenceAsrFramework.MLX\n and m_tiny.repo_id.startswith(\"mlx-community/whisper-tiny\")\n )\n assert (\n m_small.inference_framework == InferenceAsrFramework.MLX\n and m_small.repo_id.startswith(\"mlx-community/whisper-small\")\n )\n assert (\n m_base.inference_framework == InferenceAsrFramework.MLX\n and m_base.repo_id.startswith(\"mlx-community/whisper-base\")\n )\n assert (\n m_medium.inference_framework == InferenceAsrFramework.MLX\n and \"medium\" in m_medium.repo_id\n )\n assert (\n m_large.inference_framework == InferenceAsrFramework.MLX\n and \"large\" in m_large.repo_id\n )\n assert (\n m_turbo.inference_framework == InferenceAsrFramework.MLX\n and m_turbo.repo_id.endswith(\"whisper-turbo\")\n )\n\n # Force native path (no mlx or no mps)\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n n_tiny = specs._get_whisper_tiny_model()\n n_small = specs._get_whisper_small_model()\n n_base = specs._get_whisper_base_model()\n n_medium = specs._get_whisper_medium_model()\n n_large = specs._get_whisper_large_model()\n n_turbo = specs._get_whisper_turbo_model()\n assert (\n n_tiny.inference_framework == InferenceAsrFramework.WHISPER\n and n_tiny.repo_id == \"tiny\"\n )\n assert (\n n_small.inference_framework == InferenceAsrFramework.WHISPER\n and n_small.repo_id == \"small\"\n )\n assert (\n n_base.inference_framework == InferenceAsrFramework.WHISPER\n and n_base.repo_id == \"base\"\n )\n assert (\n n_medium.inference_framework == InferenceAsrFramework.WHISPER\n and n_medium.repo_id == \"medium\"\n )\n assert (\n n_large.inference_framework == InferenceAsrFramework.WHISPER\n and n_large.repo_id == \"large\"\n )\n assert (\n n_turbo.inference_framework == InferenceAsrFramework.WHISPER\n and n_turbo.repo_id == \"turbo\"\n )\n\n def test_selector_import_errors_force_native(self, monkeypatch):\n \"\"\"If torch import fails, selector must return native.\"\"\"\n from docling.datamodel import asr_model_specs as specs\n\n # Simulate environment where MPS is unavailable and mlx_whisper missing\n class _MpsOff:\n def is_built(self):\n return False\n\n def is_available(self):\n return False\n\n class _TorchOff:\n class backends:\n mps = _MpsOff()\n\n monkeypatch.setitem(sys.modules, \"torch\", _TorchOff())\n if \"mlx_whisper\" in sys.modules:\n del sys.modules[\"mlx_whisper\"]\n\n model = specs._get_whisper_base_model()\n assert model.inference_framework == InferenceAsrFramework.WHISPER\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_model_initialization(self, mock_import):\n \"\"\"Test MLX Whisper model initialization.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n assert model.enabled is True\n assert model.model_path == \"mlx-community/whisper-tiny-mlx\"\n assert model.language == \"en\"\n assert model.task == \"transcribe\"\n assert model.word_timestamps is True\n\n def test_mlx_whisper_model_import_error(self):\n \"\"\"Test that ImportError is raised when mlx-whisper is not available.\"\"\"\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n with patch(\n \"builtins.__import__\",\n side_effect=ImportError(\"No module named 'mlx_whisper'\"),\n ):\n with pytest.raises(ImportError, match=\"mlx-whisper is not installed\"):\n _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n @patch(\"builtins.__import__\")\n def test_mlx_whisper_transcribe(self, mock_import):\n \"\"\"Test MLX Whisper transcription method.\"\"\"\n # Mock the mlx_whisper module and its transcribe function\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n # Mock the transcribe result\n mock_result = {\n \"segments\": [\n {\n \"start\": 0.0,\n \"end\": 2.5,\n \"text\": \"Hello world\",\n \"words\": [\n {\"start\": 0.0, \"end\": 0.5, \"word\": \"Hello\"},\n {\"start\": 0.5, \"end\": 1.0, \"word\": \"world\"},\n ],\n }\n ]\n }\n mock_mlx_whisper.transcribe.return_value = mock_result\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n model = _MlxWhisperModel(\n enabled=True,\n artifacts_path=None,\n accelerator_options=accelerator_options,\n asr_options=asr_options,\n )\n\n # Test transcription\n audio_path = Path(\"test_audio.wav\")\n result = model.transcribe(audio_path)\n\n # Verify the result\n assert len(result) == 1\n assert result[0].start_time == 0.0\n assert result[0].end_time == 2.5\n assert result[0].text == \"Hello world\"\n assert len(result[0].words) == 2\n assert result[0].words[0].text == \"Hello\"\n assert result[0].words[1].text == \"world\"\n\n # Verify mlx_whisper.transcribe was called with correct parameters\n mock_mlx_whisper.transcribe.assert_called_once_with(\n str(audio_path),\n path_or_hf_repo=\"mlx-community/whisper-tiny-mlx\",\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n\n @patch(\"builtins.__import__\")\n def test_asr_pipeline_with_mlx_whisper(self, mock_import):\n \"\"\"Test that AsrPipeline can be initialized with MLX Whisper options.\"\"\"\n # Mock the mlx_whisper import\n mock_mlx_whisper = Mock()\n mock_import.return_value = mock_mlx_whisper\n\n accelerator_options = AcceleratorOptions(device=AcceleratorDevice.MPS)\n asr_options = InlineAsrMlxWhisperOptions(\n repo_id=\"mlx-community/whisper-tiny-mlx\",\n inference_framework=InferenceAsrFramework.MLX,\n language=\"en\",\n task=\"transcribe\",\n word_timestamps=True,\n no_speech_threshold=0.6,\n logprob_threshold=-1.0,\n compression_ratio_threshold=2.4,\n )\n pipeline_options = AsrPipelineOptions(\n asr_options=asr_options,\n accelerator_options=accelerator_options,\n )\n\n pipeline = AsrPipeline(pipeline_options)\n assert isinstance(pipeline._model, _MlxWhisperModel)\n assert pipeline._model.model_path == \"mlx-community/whisper-tiny-mlx\"\n", "framework": "pytest", "test_command": "pytest tests/test_asr_mlx_whisper.py::TestMlxWhisperIntegration::test_model_selectors_mlx_and_native_paths -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0016 | clean |
repo_patch/0013 | docling-project/docling | docling/backend/mets_gbs_backend.py | get_page_image | MetsGbsPageBackend.get_page_image | method | MetsGbsPageBackend | """Backend for GBS Google Books schema."""
import logging
import tarfile
from collections.abc import Iterable
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
from docling_core.types.doc import BoundingBox, CoordOrigin, Size
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from lxml import etree
from PIL import Image
from PIL.Image import Image as PILImage
from docling.backend.abstract_backend import PaginatedDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.base_models import InputFormat
if TYPE_CHECKING:
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
def _get_pdf_page_geometry(
size: Size,
) -> PdfPageGeometry:
boundary_type: PdfPageBoundaryType = PdfPageBoundaryType.CROP_BOX
bbox_tuple = (0, 0, size.width, size.height)
bbox = BoundingBox.from_tuple(bbox_tuple, CoordOrigin.TOPLEFT)
return PdfPageGeometry(
angle=0.0,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=boundary_type,
art_bbox=bbox,
bleed_bbox=bbox,
crop_bbox=bbox,
media_bbox=bbox,
trim_bbox=bbox,
)
class MetsGbsPageBackend(PdfPageBackend):
def __init__(self, parsed_page: SegmentedPdfPage, page_im: PILImage):
self._im = page_im
self._dpage = parsed_page
self.valid = parsed_page is not None
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
# Find intersecting cells on the page
text_piece = ""
page_size = self.get_size()
scale = (
1 # FIX - Replace with param in get_text_in_rect across backends (optional)
)
for i, cell in enumerate(self._dpage.textline_cells):
cell_bbox = (
cell.rect.to_bounding_box()
.to_top_left_origin(page_height=page_size.height)
.scaled(scale)
)
overlap_frac = cell_bbox.intersection_over_self(bbox)
if overlap_frac > 0.5:
if len(text_piece) > 0:
text_piece += " "
text_piece += cell.text
return text_piece
def get_segmented_page(self) -> Optional[SegmentedPdfPage]:
return self._dpage
def get_text_cells(self) -> Iterable[TextCell]:
return self._dpage.textline_cells
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
AREA_THRESHOLD = 0 # 32 * 32
images = self._dpage.bitmap_resources
for img in images:
cropbox = img.rect.to_bounding_box().to_top_left_origin(
self.get_size().height
)
if cropbox.area() > AREA_THRESHOLD:
cropbox = cropbox.scaled(scale=scale)
yield cropbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
# TODO: Implement this function
def get_size(self) -> Size:
return Size(
width=self._dpage.dimension.width, height=self._dpage.dimension.height
)
def unload(self) -> None:
if hasattr(self, "_im"):
delattr(self, "_im")
if hasattr(self, "_dpage"):
delattr(self, "_dpage")
class _UseType(str, Enum):
IMAGE = "image"
OCR = "OCR"
COORD_OCR = "coordOCR"
@dataclass
class _FileInfo:
file_id: str
mimetype: str
path: str
use: _UseType
@dataclass
class _PageFiles:
image: Optional[_FileInfo] = None
ocr: Optional[_FileInfo] = None
coordOCR: Optional[_FileInfo] = None
def _extract_rect(title_str: str) -> Optional[BoundingRectangle]:
"""
Extracts bbox from title string like 'bbox 279 177 306 214;x_wconf 97'
"""
parts = title_str.split(";")
for part in parts:
part = part.strip()
if part.startswith("bbox "):
try:
coords = part.split()[1:]
rect = BoundingRectangle.from_bounding_box(
bbox=BoundingBox.from_tuple(
tuple(map(int, coords)), origin=CoordOrigin.TOPLEFT
)
)
return rect
except Exception:
return None
return None
def _extract_confidence(title_str) -> float:
"""Extracts x_wconf (OCR confidence) value from title string."""
for part in title_str.split(";"):
part = part.strip()
if part.startswith("x_wconf"):
try:
return float(part.split()[1]) / 100.0
except Exception:
return 1
return 1
class MetsGbsDocumentBackend(PdfDocumentBackend):
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
super().__init__(in_doc, path_or_stream)
self._tar: tarfile.TarFile = (
tarfile.open(name=self.path_or_stream, mode="r:gz")
if isinstance(self.path_or_stream, Path)
else tarfile.open(fileobj=self.path_or_stream, mode="r:gz")
)
self.root_mets: Optional[etree._Element] = None
self.page_map: Dict[int, _PageFiles] = {}
for member in self._tar.getmembers():
if member.name.endswith(".xml"):
file = self._tar.extractfile(member)
if file is not None:
content = file.read()
self.root_mets = self._validate_mets_xml(content)
if self.root_mets is not None:
break
if self.root_mets is None:
raise RuntimeError(
f"METS GBS backend could not load document {self.document_hash}."
)
ns = {
"mets": "http://www.loc.gov/METS/",
"xlink": "http://www.w3.org/1999/xlink",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"gbs": "http://books.google.com/gbs",
"premis": "info:lc/xmlns/premis-v2",
"marc": "http://www.loc.gov/MARC21/slim",
}
file_info_by_id: Dict[str, _FileInfo] = {}
for filegrp in self.root_mets.xpath(".//mets:fileGrp", namespaces=ns):
use_raw = filegrp.get("USE")
try:
use = _UseType(use_raw)
except ValueError:
continue # Ignore unknown USE types
for file_elem in filegrp.xpath("./mets:file", namespaces=ns):
file_id = file_elem.get("ID")
mimetype = file_elem.get("MIMETYPE")
flocat_elem = file_elem.find("mets:FLocat", namespaces=ns)
href = (
flocat_elem.get("{http://www.w3.org/1999/xlink}href")
if flocat_elem is not None
else None
)
if href is None:
continue
file_info_by_id[file_id] = _FileInfo(
file_id=file_id, mimetype=mimetype, path=href, use=use
)
USE_TO_ATTR = {
_UseType.IMAGE: "image",
_UseType.OCR: "ocr",
_UseType.COORD_OCR: "coordOCR",
}
for div in self.root_mets.xpath('.//mets:div[@TYPE="page"]', namespaces=ns):
order_str = div.get("ORDER")
if not order_str:
continue
try:
page_no = int(order_str) - 1 # make 0-index pages
except ValueError:
continue
page_files = _PageFiles()
for fptr in div.xpath("./mets:fptr", namespaces=ns):
file_id = fptr.get("FILEID")
file_info = file_info_by_id.get(file_id)
if file_info:
attr = USE_TO_ATTR.get(file_info.use)
if attr:
setattr(page_files, attr, file_info)
self.page_map[page_no] = page_files
def _validate_mets_xml(self, xml_string) -> Optional[etree._Element]:
root: etree._Element = etree.fromstring(xml_string)
if (
root.tag == "{http://www.loc.gov/METS/}mets"
and root.get("PROFILE") == "gbs"
):
return root
_log.warning(f"The root element is not <mets:mets> with PROFILE='gbs': {root}")
return None
def _parse_page(self, page_no: int) -> Tuple[SegmentedPdfPage, PILImage]:
# TODO: use better fallbacks...
image_info = self.page_map[page_no].image
assert image_info is not None
ocr_info = self.page_map[page_no].coordOCR
assert ocr_info is not None
image_file = self._tar.extractfile(image_info.path)
assert image_file is not None
buf = BytesIO(image_file.read())
im: PILImage = Image.open(buf)
ocr_file = self._tar.extractfile(ocr_info.path)
assert ocr_file is not None
ocr_content = ocr_file.read()
parser = etree.HTMLParser()
ocr_root: etree._Element = etree.fromstring(ocr_content, parser=parser)
line_cells: List[TextCell] = []
word_cells: List[TextCell] = []
page_div = ocr_root.xpath("//div[@class='ocr_page']")
size = Size(width=im.size[0], height=im.size[1])
if page_div:
title = page_div[0].attrib.get("title", "")
rect = _extract_rect(title)
if rect:
size = Size(width=rect.width, height=rect.height)
else:
_log.error(f"Could not find ocr_page for page {page_no}")
im = im.resize(size=(round(size.width), round(size.height)))
im = im.convert("RGB")
# Extract all ocrx_word spans
for ix, word in enumerate(ocr_root.xpath("//span[@class='ocrx_word']")):
text = "".join(word.itertext()).strip()
title = word.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
word_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
# Extract all ocr_line spans
# line: etree._Element
for ix, line in enumerate(ocr_root.xpath("//span[@class='ocr_line']")):
text = "".join(line.itertext()).strip()
title = line.attrib.get("title", "")
rect = _extract_rect(title)
conf = _extract_confidence(title)
if rect:
line_cells.append(
TextCell(
index=ix,
text=text,
orig=text,
rect=rect,
from_ocr=True,
confidence=conf,
)
)
page = SegmentedPdfPage(
dimension=_get_pdf_page_geometry(size),
textline_cells=line_cells,
char_cells=[],
word_cells=word_cells,
has_textlines=True,
has_words=True,
has_chars=False,
)
return page, im
def page_count(self) -> int:
return len(self.page_map)
def load_page(self, page_no: int) -> MetsGbsPageBackend:
# TODO: is this thread-safe?
page, im = self._parse_page(page_no)
return MetsGbsPageBackend(parsed_page=page, page_im=im)
def is_valid(self) -> bool:
return self.root_mets is not None and self.page_count() > 0
@classmethod
def supported_formats(cls) -> Set[InputFormat]:
return {InputFormat.METS_GBS}
@classmethod
def supports_pagination(cls) -> bool:
return True
def unload(self) -> None:
super().unload()
self._tar.close() | def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image: | page_size = self.get_size()
assert (
page_size.width == self._im.size[0] and page_size.height == self._im.size[1]
)
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
image = self._im.resize(
size=(round(page_size.width * scale), round(page_size.height * scale))
).crop(cropbox.scaled(scale=scale).as_tuple())
return image | def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
page_size = self.get_size()
assert (
page_size.width == self._im.size[0] and page_size.height == self._im.size[1]
)
if not cropbox:
cropbox = BoundingBox(
l=0,
r=page_size.width,
t=0,
b=page_size.height,
coord_origin=CoordOrigin.TOPLEFT,
)
image = self._im.resize(
size=(round(page_size.width * scale), round(page_size.height * scale))
).crop(cropbox.scaled(scale=scale).as_tuple())
return image | [{"test_file": "tests/test_backend_mets_gbs.py", "test_function": "test_crop_page_image", "test_content": "from pathlib import Path\n\nimport pytest\n\nfrom docling.backend.mets_gbs_backend import MetsGbsDocumentBackend, MetsGbsPageBackend\nfrom docling.datamodel.base_models import BoundingBox, InputFormat\nfrom docling.datamodel.document import InputDocument\n\n\n@pytest.fixture\ndef test_doc_path():\n return Path(\"tests/data/mets_gbs/32044009881525_select.tar.gz\")\n\n\ndef _get_backend(pdf_doc):\n in_doc = InputDocument(\n path_or_stream=pdf_doc,\n format=InputFormat.METS_GBS,\n backend=MetsGbsDocumentBackend,\n )\n\n doc_backend = in_doc._backend\n return doc_backend\n\n\ndef test_process_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n\n for page_index in range(doc_backend.page_count()):\n page_backend: MetsGbsPageBackend = doc_backend.load_page(page_index)\n list(page_backend.get_text_cells())\n\n # Clean up page backend after each iteration\n page_backend.unload()\n\n # Explicitly clean up document backend to prevent race conditions in CI\n doc_backend.unload()\n\n\ndef test_get_text_from_rect(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n # Get the title text of the DocLayNet paper\n textpiece = page_backend.get_text_in_rect(\n bbox=BoundingBox(l=275, t=263, r=1388, b=311)\n )\n ref = \"recently become prevalent that he who speaks\"\n\n assert textpiece.strip() == ref\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_crop_page_image(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n page_backend: MetsGbsPageBackend = doc_backend.load_page(0)\n\n page_backend.get_page_image(\n scale=2, cropbox=BoundingBox(l=270, t=587, r=1385, b=1995)\n )\n # im.show()\n\n # Explicitly clean up resources\n page_backend.unload()\n doc_backend.unload()\n\n\ndef test_num_pages(test_doc_path):\n doc_backend: MetsGbsDocumentBackend = _get_backend(test_doc_path)\n assert doc_backend.is_valid()\n assert doc_backend.page_count() == 3\n\n # Explicitly clean up resources to prevent race conditions in CI\n doc_backend.unload()\n", "framework": "pytest", "test_command": "pytest tests/test_backend_mets_gbs.py::test_crop_page_image -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 16, "file_lines": 400, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0017 | file_overlap | |
repo_patch/0014 | docling-project/docling | docling/backend/image_backend.py | get_page_image | _ImagePageBackend.get_page_image | method | _ImagePageBackend | import logging
from io import BytesIO
from pathlib import Path
from typing import Iterable, List, Optional, Union
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling_core.types.doc.page import (
BoundingRectangle,
PdfPageBoundaryType,
PdfPageGeometry,
SegmentedPdfPage,
TextCell,
)
from PIL import Image
from docling.backend.abstract_backend import AbstractDocumentBackend
from docling.backend.pdf_backend import PdfDocumentBackend, PdfPageBackend
from docling.datamodel.backend_options import PdfBackendOptions
from docling.datamodel.base_models import InputFormat, Size
from docling.datamodel.document import InputDocument
_log = logging.getLogger(__name__)
class _ImagePageBackend(PdfPageBackend):
def __init__(self, image: Image.Image):
self._image: Optional[Image.Image] = image
self.valid: bool = self._image is not None
def is_valid(self) -> bool:
return self.valid
def get_text_in_rect(self, bbox: BoundingBox) -> str:
# No text extraction from raw images without OCR
return ""
def get_segmented_page(self) -> SegmentedPdfPage:
# Return empty segmented page with proper dimensions for raw images
assert self._image is not None
page_size = self.get_size()
bbox = BoundingBox(
l=0.0,
t=0.0,
r=float(page_size.width),
b=float(page_size.height),
coord_origin=CoordOrigin.BOTTOMLEFT,
)
dimension = PdfPageGeometry(
angle=0.0,
rect=BoundingRectangle.from_bounding_box(bbox),
boundary_type=PdfPageBoundaryType.CROP_BOX,
art_bbox=bbox,
bleed_bbox=bbox,
crop_bbox=bbox,
media_bbox=bbox,
trim_bbox=bbox,
)
return SegmentedPdfPage(
dimension=dimension,
char_cells=[],
word_cells=[],
textline_cells=[],
has_chars=False,
has_words=False,
has_lines=False,
)
def get_text_cells(self) -> Iterable[TextCell]:
# No text cells on raw images
return []
def get_bitmap_rects(self, scale: float = 1) -> Iterable[BoundingBox]:
# For raw images, the entire page is a bitmap
assert self._image is not None
page_size = self.get_size()
full_page_bbox = BoundingBox(
l=0.0,
t=0.0,
r=float(page_size.width),
b=float(page_size.height),
coord_origin=CoordOrigin.TOPLEFT,
)
if scale != 1:
full_page_bbox = full_page_bbox.scaled(scale=scale)
yield full_page_bbox
def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
# TODO: Implement this function
def get_size(self) -> Size:
assert self._image is not None
return Size(width=self._image.width, height=self._image.height)
def unload(self):
# Help GC and free memory
self._image = None
class ImageDocumentBackend(PdfDocumentBackend):
"""Image-native backend that bypasses pypdfium2.
Notes:
- Subclasses PdfDocumentBackend to satisfy pipeline type checks.
- Intentionally avoids calling PdfDocumentBackend.__init__ to skip
the image→PDF conversion and any pypdfium2 usage.
- Handles multi-page TIFF by extracting frames eagerly to separate
Image objects to keep thread-safety when pages process in parallel.
"""
def __init__(
self,
in_doc: InputDocument,
path_or_stream: Union[BytesIO, Path],
options: PdfBackendOptions = PdfBackendOptions(),
):
# Bypass PdfDocumentBackend.__init__ to avoid image→PDF conversion
AbstractDocumentBackend.__init__(self, in_doc, path_or_stream, options)
self.options: PdfBackendOptions = options
if self.input_format not in {InputFormat.IMAGE}:
raise RuntimeError(
f"Incompatible file format {self.input_format} was passed to ImageDocumentBackend."
)
# Load frames eagerly for thread-safety across pages
self._frames: List[Image.Image] = []
try:
img = Image.open(self.path_or_stream) # type: ignore[arg-type]
# Handle multi-frame and single-frame images
# - multiframe formats: TIFF, GIF, ICO
# - singleframe formats: JPEG (.jpg, .jpeg), PNG (.png), BMP, WEBP (unless animated), HEIC
frame_count = getattr(img, "n_frames", 1)
if frame_count > 1:
for i in range(frame_count):
img.seek(i)
self._frames.append(img.copy().convert("RGB"))
else:
self._frames.append(img.convert("RGB"))
except Exception as e:
raise RuntimeError(f"Could not load image for document {self.file}") from e
def is_valid(self) -> bool:
return len(self._frames) > 0
def page_count(self) -> int:
return len(self._frames)
def load_page(self, page_no: int) -> _ImagePageBackend:
if not (0 <= page_no < len(self._frames)):
raise IndexError(f"Page index out of range: {page_no}")
return _ImagePageBackend(self._frames[page_no])
@classmethod
def supported_formats(cls) -> set[InputFormat]:
# Only IMAGE here; PDF handling remains in PDF-oriented backends
return {InputFormat.IMAGE}
@classmethod
def supports_pagination(cls) -> bool:
return True
def unload(self):
super().unload()
self._frames = [] | def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image: | assert self._image is not None
img = self._image
if cropbox is not None:
# Expected cropbox comes in TOPLEFT coords in our pipeline
if cropbox.coord_origin != CoordOrigin.TOPLEFT:
# Convert to TOPLEFT relative to current image height
cropbox = cropbox.to_top_left_origin(img.height)
left, top, right, bottom = cropbox.as_tuple()
left = max(0, round(left))
top = max(0, round(top))
right = min(img.width, round(right))
bottom = min(img.height, round(bottom))
img = img.crop((left, top, right, bottom))
if scale != 1:
new_w = max(1, round(img.width * scale))
new_h = max(1, round(img.height * scale))
img = img.resize((new_w, new_h))
return img | def get_page_image(
self, scale: float = 1, cropbox: Optional[BoundingBox] = None
) -> Image.Image:
assert self._image is not None
img = self._image
if cropbox is not None:
# Expected cropbox comes in TOPLEFT coords in our pipeline
if cropbox.coord_origin != CoordOrigin.TOPLEFT:
# Convert to TOPLEFT relative to current image height
cropbox = cropbox.to_top_left_origin(img.height)
left, top, right, bottom = cropbox.as_tuple()
left = max(0, round(left))
top = max(0, round(top))
right = min(img.width, round(right))
bottom = min(img.height, round(bottom))
img = img.crop((left, top, right, bottom))
if scale != 1:
new_w = max(1, round(img.width * scale))
new_h = max(1, round(img.height * scale))
img = img.resize((new_w, new_h))
return img | [{"test_file": "tests/test_backend_image_native.py", "test_function": "test_get_page_image_full", "test_content": "from io import BytesIO\nfrom pathlib import Path\n\nimport pytest\nfrom docling_core.types.doc import BoundingBox, CoordOrigin\nfrom PIL import Image\n\nfrom docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend\nfrom docling.datamodel.base_models import DocumentStream, InputFormat\nfrom docling.datamodel.document import InputDocument, _DocumentConversionInput\nfrom docling.document_converter import DocumentConverter, ImageFormatOption\nfrom docling.document_extractor import DocumentExtractor\n\n\ndef _make_png_stream(\n width: int = 64, height: int = 48, color=(123, 45, 67)\n) -> DocumentStream:\n img = Image.new(\"RGB\", (width, height), color)\n buf = BytesIO()\n img.save(buf, format=\"PNG\")\n buf.seek(0)\n return DocumentStream(name=\"test.png\", stream=buf)\n\n\ndef _make_multipage_tiff_stream(num_pages: int = 3, size=(32, 32)) -> DocumentStream:\n frames = [\n Image.new(\"RGB\", size, (i * 10 % 255, i * 20 % 255, i * 30 % 255))\n for i in range(num_pages)\n ]\n buf = BytesIO()\n frames[0].save(buf, format=\"TIFF\", save_all=True, append_images=frames[1:])\n buf.seek(0)\n return DocumentStream(name=\"test.tiff\", stream=buf)\n\n\ndef test_docs_builder_uses_image_backend_for_image_stream():\n stream = _make_png_stream()\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n # Provide format options mapping that includes IMAGE -> ImageFormatOption (which carries ImageDocumentBackend)\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n docs = list(conv_input.docs(format_options))\n assert len(docs) == 1\n in_doc = docs[0]\n assert in_doc.format == InputFormat.IMAGE\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 1\n\n\ndef test_docs_builder_multipage_tiff_counts_frames():\n stream = _make_multipage_tiff_stream(num_pages=4)\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n in_doc = next(conv_input.docs(format_options))\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 4\n\n\ndef test_converter_default_maps_image_to_image_backend():\n converter = DocumentConverter(allowed_formats=[InputFormat.IMAGE])\n backend_cls = converter.format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef test_extractor_default_maps_image_to_image_backend():\n extractor = DocumentExtractor(allowed_formats=[InputFormat.IMAGE])\n backend_cls = extractor.extraction_format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef _get_backend_from_stream(stream: DocumentStream):\n \"\"\"Helper to create InputDocument with ImageDocumentBackend from a stream.\"\"\"\n in_doc = InputDocument(\n path_or_stream=stream.stream,\n format=InputFormat.IMAGE,\n backend=ImageDocumentBackend,\n filename=stream.name,\n )\n return in_doc._backend\n\n\ndef test_num_pages_single():\n \"\"\"Test page count for single-page image.\"\"\"\n stream = _make_png_stream(width=100, height=80)\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 1\n\n\ndef test_num_pages_multipage():\n \"\"\"Test page count for multi-page TIFF.\"\"\"\n stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 5\n\n\ndef test_get_size():\n \"\"\"Test getting page size.\"\"\"\n width, height = 120, 90\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n size = page_backend.get_size()\n assert size.width == width\n assert size.height == height\n\n\ndef test_get_page_image_full():\n \"\"\"Test getting full page image.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image()\n assert img.width == width\n assert img.height == height\n\n\ndef test_get_page_image_scaled():\n \"\"\"Test getting scaled page image.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image(scale=scale)\n assert img.width == round(width * scale)\n assert img.height == round(height * scale)\n\n\ndef test_crop_page_image():\n \"\"\"Test cropping page image.\"\"\"\n width, height = 200, 150\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n # Crop a region from the center\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(cropbox=cropbox)\n assert img.width == 100 # 150 - 50\n assert img.height == 90 # 120 - 30\n\n\ndef test_crop_page_image_scaled():\n \"\"\"Test cropping and scaling page image.\"\"\"\n width, height = 200, 150\n scale = 0.5\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(scale=scale, cropbox=cropbox)\n assert img.width == round(100 * scale) # cropped width * scale\n assert img.height == round(90 * scale) # cropped height * scale\n\n\ndef test_get_bitmap_rects():\n \"\"\"Test getting bitmap rects - should return full page rectangle.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects())\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width)\n assert bbox.b == float(height)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_bitmap_rects_scaled():\n \"\"\"Test getting bitmap rects with scaling.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects(scale=scale))\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width * scale)\n assert bbox.b == float(height * scale)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_text_in_rect():\n \"\"\"Test that get_text_in_rect returns empty string for images (no OCR).\"\"\"\n stream = _make_png_stream()\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n bbox = BoundingBox(l=10, t=10, r=50, b=50, coord_origin=CoordOrigin.TOPLEFT)\n text = page_backend.get_text_in_rect(bbox)\n assert text == \"\"\n\n\ndef test_multipage_access():\n \"\"\"Test accessing different pages in multi-page image.\"\"\"\n num_pages = 4\n stream = _make_multipage_tiff_stream(num_pages=num_pages, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == num_pages\n\n # Access each page\n for i in range(num_pages):\n page_backend = doc_backend.load_page(i)\n assert page_backend.is_valid()\n size = page_backend.get_size()\n assert size.width == 64\n assert size.height == 64\n", "framework": "pytest", "test_command": "pytest tests/test_backend_image_native.py::test_get_page_image_full -xvs"}, {"test_file": "tests/test_backend_image_native.py", "test_function": "test_get_page_image_scaled", "test_content": "from io import BytesIO\nfrom pathlib import Path\n\nimport pytest\nfrom docling_core.types.doc import BoundingBox, CoordOrigin\nfrom PIL import Image\n\nfrom docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend\nfrom docling.datamodel.base_models import DocumentStream, InputFormat\nfrom docling.datamodel.document import InputDocument, _DocumentConversionInput\nfrom docling.document_converter import DocumentConverter, ImageFormatOption\nfrom docling.document_extractor import DocumentExtractor\n\n\ndef _make_png_stream(\n width: int = 64, height: int = 48, color=(123, 45, 67)\n) -> DocumentStream:\n img = Image.new(\"RGB\", (width, height), color)\n buf = BytesIO()\n img.save(buf, format=\"PNG\")\n buf.seek(0)\n return DocumentStream(name=\"test.png\", stream=buf)\n\n\ndef _make_multipage_tiff_stream(num_pages: int = 3, size=(32, 32)) -> DocumentStream:\n frames = [\n Image.new(\"RGB\", size, (i * 10 % 255, i * 20 % 255, i * 30 % 255))\n for i in range(num_pages)\n ]\n buf = BytesIO()\n frames[0].save(buf, format=\"TIFF\", save_all=True, append_images=frames[1:])\n buf.seek(0)\n return DocumentStream(name=\"test.tiff\", stream=buf)\n\n\ndef test_docs_builder_uses_image_backend_for_image_stream():\n stream = _make_png_stream()\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n # Provide format options mapping that includes IMAGE -> ImageFormatOption (which carries ImageDocumentBackend)\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n docs = list(conv_input.docs(format_options))\n assert len(docs) == 1\n in_doc = docs[0]\n assert in_doc.format == InputFormat.IMAGE\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 1\n\n\ndef test_docs_builder_multipage_tiff_counts_frames():\n stream = _make_multipage_tiff_stream(num_pages=4)\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n in_doc = next(conv_input.docs(format_options))\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 4\n\n\ndef test_converter_default_maps_image_to_image_backend():\n converter = DocumentConverter(allowed_formats=[InputFormat.IMAGE])\n backend_cls = converter.format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef test_extractor_default_maps_image_to_image_backend():\n extractor = DocumentExtractor(allowed_formats=[InputFormat.IMAGE])\n backend_cls = extractor.extraction_format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef _get_backend_from_stream(stream: DocumentStream):\n \"\"\"Helper to create InputDocument with ImageDocumentBackend from a stream.\"\"\"\n in_doc = InputDocument(\n path_or_stream=stream.stream,\n format=InputFormat.IMAGE,\n backend=ImageDocumentBackend,\n filename=stream.name,\n )\n return in_doc._backend\n\n\ndef test_num_pages_single():\n \"\"\"Test page count for single-page image.\"\"\"\n stream = _make_png_stream(width=100, height=80)\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 1\n\n\ndef test_num_pages_multipage():\n \"\"\"Test page count for multi-page TIFF.\"\"\"\n stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 5\n\n\ndef test_get_size():\n \"\"\"Test getting page size.\"\"\"\n width, height = 120, 90\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n size = page_backend.get_size()\n assert size.width == width\n assert size.height == height\n\n\ndef test_get_page_image_full():\n \"\"\"Test getting full page image.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image()\n assert img.width == width\n assert img.height == height\n\n\ndef test_get_page_image_scaled():\n \"\"\"Test getting scaled page image.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image(scale=scale)\n assert img.width == round(width * scale)\n assert img.height == round(height * scale)\n\n\ndef test_crop_page_image():\n \"\"\"Test cropping page image.\"\"\"\n width, height = 200, 150\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n # Crop a region from the center\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(cropbox=cropbox)\n assert img.width == 100 # 150 - 50\n assert img.height == 90 # 120 - 30\n\n\ndef test_crop_page_image_scaled():\n \"\"\"Test cropping and scaling page image.\"\"\"\n width, height = 200, 150\n scale = 0.5\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(scale=scale, cropbox=cropbox)\n assert img.width == round(100 * scale) # cropped width * scale\n assert img.height == round(90 * scale) # cropped height * scale\n\n\ndef test_get_bitmap_rects():\n \"\"\"Test getting bitmap rects - should return full page rectangle.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects())\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width)\n assert bbox.b == float(height)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_bitmap_rects_scaled():\n \"\"\"Test getting bitmap rects with scaling.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects(scale=scale))\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width * scale)\n assert bbox.b == float(height * scale)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_text_in_rect():\n \"\"\"Test that get_text_in_rect returns empty string for images (no OCR).\"\"\"\n stream = _make_png_stream()\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n bbox = BoundingBox(l=10, t=10, r=50, b=50, coord_origin=CoordOrigin.TOPLEFT)\n text = page_backend.get_text_in_rect(bbox)\n assert text == \"\"\n\n\ndef test_multipage_access():\n \"\"\"Test accessing different pages in multi-page image.\"\"\"\n num_pages = 4\n stream = _make_multipage_tiff_stream(num_pages=num_pages, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == num_pages\n\n # Access each page\n for i in range(num_pages):\n page_backend = doc_backend.load_page(i)\n assert page_backend.is_valid()\n size = page_backend.get_size()\n assert size.width == 64\n assert size.height == 64\n", "framework": "pytest", "test_command": "pytest tests/test_backend_image_native.py::test_get_page_image_scaled -xvs"}, {"test_file": "tests/test_backend_image_native.py", "test_function": "test_crop_page_image", "test_content": "from io import BytesIO\nfrom pathlib import Path\n\nimport pytest\nfrom docling_core.types.doc import BoundingBox, CoordOrigin\nfrom PIL import Image\n\nfrom docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend\nfrom docling.datamodel.base_models import DocumentStream, InputFormat\nfrom docling.datamodel.document import InputDocument, _DocumentConversionInput\nfrom docling.document_converter import DocumentConverter, ImageFormatOption\nfrom docling.document_extractor import DocumentExtractor\n\n\ndef _make_png_stream(\n width: int = 64, height: int = 48, color=(123, 45, 67)\n) -> DocumentStream:\n img = Image.new(\"RGB\", (width, height), color)\n buf = BytesIO()\n img.save(buf, format=\"PNG\")\n buf.seek(0)\n return DocumentStream(name=\"test.png\", stream=buf)\n\n\ndef _make_multipage_tiff_stream(num_pages: int = 3, size=(32, 32)) -> DocumentStream:\n frames = [\n Image.new(\"RGB\", size, (i * 10 % 255, i * 20 % 255, i * 30 % 255))\n for i in range(num_pages)\n ]\n buf = BytesIO()\n frames[0].save(buf, format=\"TIFF\", save_all=True, append_images=frames[1:])\n buf.seek(0)\n return DocumentStream(name=\"test.tiff\", stream=buf)\n\n\ndef test_docs_builder_uses_image_backend_for_image_stream():\n stream = _make_png_stream()\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n # Provide format options mapping that includes IMAGE -> ImageFormatOption (which carries ImageDocumentBackend)\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n docs = list(conv_input.docs(format_options))\n assert len(docs) == 1\n in_doc = docs[0]\n assert in_doc.format == InputFormat.IMAGE\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 1\n\n\ndef test_docs_builder_multipage_tiff_counts_frames():\n stream = _make_multipage_tiff_stream(num_pages=4)\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n in_doc = next(conv_input.docs(format_options))\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 4\n\n\ndef test_converter_default_maps_image_to_image_backend():\n converter = DocumentConverter(allowed_formats=[InputFormat.IMAGE])\n backend_cls = converter.format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef test_extractor_default_maps_image_to_image_backend():\n extractor = DocumentExtractor(allowed_formats=[InputFormat.IMAGE])\n backend_cls = extractor.extraction_format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef _get_backend_from_stream(stream: DocumentStream):\n \"\"\"Helper to create InputDocument with ImageDocumentBackend from a stream.\"\"\"\n in_doc = InputDocument(\n path_or_stream=stream.stream,\n format=InputFormat.IMAGE,\n backend=ImageDocumentBackend,\n filename=stream.name,\n )\n return in_doc._backend\n\n\ndef test_num_pages_single():\n \"\"\"Test page count for single-page image.\"\"\"\n stream = _make_png_stream(width=100, height=80)\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 1\n\n\ndef test_num_pages_multipage():\n \"\"\"Test page count for multi-page TIFF.\"\"\"\n stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 5\n\n\ndef test_get_size():\n \"\"\"Test getting page size.\"\"\"\n width, height = 120, 90\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n size = page_backend.get_size()\n assert size.width == width\n assert size.height == height\n\n\ndef test_get_page_image_full():\n \"\"\"Test getting full page image.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image()\n assert img.width == width\n assert img.height == height\n\n\ndef test_get_page_image_scaled():\n \"\"\"Test getting scaled page image.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image(scale=scale)\n assert img.width == round(width * scale)\n assert img.height == round(height * scale)\n\n\ndef test_crop_page_image():\n \"\"\"Test cropping page image.\"\"\"\n width, height = 200, 150\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n # Crop a region from the center\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(cropbox=cropbox)\n assert img.width == 100 # 150 - 50\n assert img.height == 90 # 120 - 30\n\n\ndef test_crop_page_image_scaled():\n \"\"\"Test cropping and scaling page image.\"\"\"\n width, height = 200, 150\n scale = 0.5\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(scale=scale, cropbox=cropbox)\n assert img.width == round(100 * scale) # cropped width * scale\n assert img.height == round(90 * scale) # cropped height * scale\n\n\ndef test_get_bitmap_rects():\n \"\"\"Test getting bitmap rects - should return full page rectangle.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects())\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width)\n assert bbox.b == float(height)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_bitmap_rects_scaled():\n \"\"\"Test getting bitmap rects with scaling.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects(scale=scale))\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width * scale)\n assert bbox.b == float(height * scale)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_text_in_rect():\n \"\"\"Test that get_text_in_rect returns empty string for images (no OCR).\"\"\"\n stream = _make_png_stream()\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n bbox = BoundingBox(l=10, t=10, r=50, b=50, coord_origin=CoordOrigin.TOPLEFT)\n text = page_backend.get_text_in_rect(bbox)\n assert text == \"\"\n\n\ndef test_multipage_access():\n \"\"\"Test accessing different pages in multi-page image.\"\"\"\n num_pages = 4\n stream = _make_multipage_tiff_stream(num_pages=num_pages, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == num_pages\n\n # Access each page\n for i in range(num_pages):\n page_backend = doc_backend.load_page(i)\n assert page_backend.is_valid()\n size = page_backend.get_size()\n assert size.width == 64\n assert size.height == 64\n", "framework": "pytest", "test_command": "pytest tests/test_backend_image_native.py::test_crop_page_image -xvs"}, {"test_file": "tests/test_backend_image_native.py", "test_function": "test_crop_page_image_scaled", "test_content": "from io import BytesIO\nfrom pathlib import Path\n\nimport pytest\nfrom docling_core.types.doc import BoundingBox, CoordOrigin\nfrom PIL import Image\n\nfrom docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend\nfrom docling.datamodel.base_models import DocumentStream, InputFormat\nfrom docling.datamodel.document import InputDocument, _DocumentConversionInput\nfrom docling.document_converter import DocumentConverter, ImageFormatOption\nfrom docling.document_extractor import DocumentExtractor\n\n\ndef _make_png_stream(\n width: int = 64, height: int = 48, color=(123, 45, 67)\n) -> DocumentStream:\n img = Image.new(\"RGB\", (width, height), color)\n buf = BytesIO()\n img.save(buf, format=\"PNG\")\n buf.seek(0)\n return DocumentStream(name=\"test.png\", stream=buf)\n\n\ndef _make_multipage_tiff_stream(num_pages: int = 3, size=(32, 32)) -> DocumentStream:\n frames = [\n Image.new(\"RGB\", size, (i * 10 % 255, i * 20 % 255, i * 30 % 255))\n for i in range(num_pages)\n ]\n buf = BytesIO()\n frames[0].save(buf, format=\"TIFF\", save_all=True, append_images=frames[1:])\n buf.seek(0)\n return DocumentStream(name=\"test.tiff\", stream=buf)\n\n\ndef test_docs_builder_uses_image_backend_for_image_stream():\n stream = _make_png_stream()\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n # Provide format options mapping that includes IMAGE -> ImageFormatOption (which carries ImageDocumentBackend)\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n docs = list(conv_input.docs(format_options))\n assert len(docs) == 1\n in_doc = docs[0]\n assert in_doc.format == InputFormat.IMAGE\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 1\n\n\ndef test_docs_builder_multipage_tiff_counts_frames():\n stream = _make_multipage_tiff_stream(num_pages=4)\n conv_input = _DocumentConversionInput(path_or_stream_iterator=[stream])\n format_options = {InputFormat.IMAGE: ImageFormatOption()}\n\n in_doc = next(conv_input.docs(format_options))\n assert isinstance(in_doc._backend, ImageDocumentBackend)\n assert in_doc.page_count == 4\n\n\ndef test_converter_default_maps_image_to_image_backend():\n converter = DocumentConverter(allowed_formats=[InputFormat.IMAGE])\n backend_cls = converter.format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef test_extractor_default_maps_image_to_image_backend():\n extractor = DocumentExtractor(allowed_formats=[InputFormat.IMAGE])\n backend_cls = extractor.extraction_format_to_options[InputFormat.IMAGE].backend\n assert backend_cls is ImageDocumentBackend\n\n\ndef _get_backend_from_stream(stream: DocumentStream):\n \"\"\"Helper to create InputDocument with ImageDocumentBackend from a stream.\"\"\"\n in_doc = InputDocument(\n path_or_stream=stream.stream,\n format=InputFormat.IMAGE,\n backend=ImageDocumentBackend,\n filename=stream.name,\n )\n return in_doc._backend\n\n\ndef test_num_pages_single():\n \"\"\"Test page count for single-page image.\"\"\"\n stream = _make_png_stream(width=100, height=80)\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 1\n\n\ndef test_num_pages_multipage():\n \"\"\"Test page count for multi-page TIFF.\"\"\"\n stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == 5\n\n\ndef test_get_size():\n \"\"\"Test getting page size.\"\"\"\n width, height = 120, 90\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n size = page_backend.get_size()\n assert size.width == width\n assert size.height == height\n\n\ndef test_get_page_image_full():\n \"\"\"Test getting full page image.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image()\n assert img.width == width\n assert img.height == height\n\n\ndef test_get_page_image_scaled():\n \"\"\"Test getting scaled page image.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n img = page_backend.get_page_image(scale=scale)\n assert img.width == round(width * scale)\n assert img.height == round(height * scale)\n\n\ndef test_crop_page_image():\n \"\"\"Test cropping page image.\"\"\"\n width, height = 200, 150\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n # Crop a region from the center\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(cropbox=cropbox)\n assert img.width == 100 # 150 - 50\n assert img.height == 90 # 120 - 30\n\n\ndef test_crop_page_image_scaled():\n \"\"\"Test cropping and scaling page image.\"\"\"\n width, height = 200, 150\n scale = 0.5\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)\n img = page_backend.get_page_image(scale=scale, cropbox=cropbox)\n assert img.width == round(100 * scale) # cropped width * scale\n assert img.height == round(90 * scale) # cropped height * scale\n\n\ndef test_get_bitmap_rects():\n \"\"\"Test getting bitmap rects - should return full page rectangle.\"\"\"\n width, height = 100, 80\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects())\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width)\n assert bbox.b == float(height)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_bitmap_rects_scaled():\n \"\"\"Test getting bitmap rects with scaling.\"\"\"\n width, height = 100, 80\n scale = 2.0\n stream = _make_png_stream(width=width, height=height)\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n rects = list(page_backend.get_bitmap_rects(scale=scale))\n assert len(rects) == 1\n bbox = rects[0]\n assert bbox.l == 0.0\n assert bbox.t == 0.0\n assert bbox.r == float(width * scale)\n assert bbox.b == float(height * scale)\n assert bbox.coord_origin == CoordOrigin.TOPLEFT\n\n\ndef test_get_text_in_rect():\n \"\"\"Test that get_text_in_rect returns empty string for images (no OCR).\"\"\"\n stream = _make_png_stream()\n doc_backend = _get_backend_from_stream(stream)\n page_backend: _ImagePageBackend = doc_backend.load_page(0)\n\n bbox = BoundingBox(l=10, t=10, r=50, b=50, coord_origin=CoordOrigin.TOPLEFT)\n text = page_backend.get_text_in_rect(bbox)\n assert text == \"\"\n\n\ndef test_multipage_access():\n \"\"\"Test accessing different pages in multi-page image.\"\"\"\n num_pages = 4\n stream = _make_multipage_tiff_stream(num_pages=num_pages, size=(64, 64))\n doc_backend = _get_backend_from_stream(stream)\n assert doc_backend.page_count() == num_pages\n\n # Access each page\n for i in range(num_pages):\n page_backend = doc_backend.load_page(i)\n assert page_backend.is_valid()\n size = page_backend.get_size()\n assert size.width == 64\n assert size.height == 64\n", "framework": "pytest", "test_command": "pytest tests/test_backend_image_native.py::test_crop_page_image_scaled -xvs"}] | {"repo_url": "https://github.com/docling-project/docling", "install_cmd": "pip install -e .", "commit_sha": "752f81b3dd451208fb59297ea5ef7917cb4fc891", "frozen_requirements": "frozen_requirements/docling-project_docling.txt"} | {"body_lines": 18, "file_lines": 189, "has_docstring": false, "num_tests": 4} | {"status": "passed", "tests_run": 4} | repo_patch/0020 | file_overlap | |
repo_patch/0015 | fastapi/fastapi | fastapi/_compat/shared.py | is_uploadfile_sequence_annotation | is_uploadfile_sequence_annotation | function | null | import types
import typing
import warnings
from collections import deque
from collections.abc import Mapping, Sequence
from dataclasses import is_dataclass
from typing import (
Annotated,
Any,
TypeGuard,
TypeVar,
Union,
get_args,
get_origin,
)
from fastapi.types import UnionType
from pydantic import BaseModel
from pydantic.version import VERSION as PYDANTIC_VERSION
from starlette.datastructures import UploadFile
_T = TypeVar("_T")
# Copy from Pydantic: pydantic/_internal/_typing_extra.py
WithArgsTypes: tuple[Any, ...] = (
typing._GenericAlias, # type: ignore[attr-defined]
types.GenericAlias,
types.UnionType,
) # pyright: ignore[reportAttributeAccessIssue]
PYDANTIC_VERSION_MINOR_TUPLE = tuple(int(x) for x in PYDANTIC_VERSION.split(".")[:2])
sequence_annotation_to_type = {
Sequence: list,
list: list,
tuple: tuple,
set: set,
frozenset: frozenset,
deque: deque,
}
sequence_types: tuple[type[Any], ...] = tuple(sequence_annotation_to_type.keys())
# Copy of Pydantic: pydantic/_internal/_utils.py with added TypeGuard
def lenient_issubclass(
cls: Any, class_or_tuple: type[_T] | tuple[type[_T], ...] | None
) -> TypeGuard[type[_T]]:
try:
return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type]
except TypeError: # pragma: no cover
if isinstance(cls, WithArgsTypes):
return False
raise # pragma: no cover
def _annotation_is_sequence(annotation: type[Any] | None) -> bool:
if lenient_issubclass(annotation, (str, bytes)):
return False
return lenient_issubclass(annotation, sequence_types)
def field_annotation_is_sequence(annotation: type[Any] | None) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if field_annotation_is_sequence(arg):
return True
return False
return _annotation_is_sequence(annotation) or _annotation_is_sequence(
get_origin(annotation)
)
def value_is_sequence(value: Any) -> bool:
return isinstance(value, sequence_types) and not isinstance(value, (str, bytes))
def _annotation_is_complex(annotation: type[Any] | None) -> bool:
return (
lenient_issubclass(annotation, (BaseModel, Mapping, UploadFile))
or _annotation_is_sequence(annotation)
or is_dataclass(annotation)
)
def field_annotation_is_complex(annotation: type[Any] | None) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
return any(field_annotation_is_complex(arg) for arg in get_args(annotation))
if origin is Annotated:
return field_annotation_is_complex(get_args(annotation)[0])
return (
_annotation_is_complex(annotation)
or _annotation_is_complex(origin)
or hasattr(origin, "__pydantic_core_schema__")
or hasattr(origin, "__get_pydantic_core_schema__")
)
def field_annotation_is_scalar(annotation: Any) -> bool:
# handle Ellipsis here to make tuple[int, ...] work nicely
return annotation is Ellipsis or not field_annotation_is_complex(annotation)
def field_annotation_is_scalar_sequence(annotation: type[Any] | None) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one_scalar_sequence = False
for arg in get_args(annotation):
if field_annotation_is_scalar_sequence(arg):
at_least_one_scalar_sequence = True
continue
elif not field_annotation_is_scalar(arg):
return False
return at_least_one_scalar_sequence
return field_annotation_is_sequence(annotation) and all(
field_annotation_is_scalar(sub_annotation)
for sub_annotation in get_args(annotation)
)
def is_bytes_or_nonable_bytes_annotation(annotation: Any) -> bool:
if lenient_issubclass(annotation, bytes):
return True
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if lenient_issubclass(arg, bytes):
return True
return False
def is_uploadfile_or_nonable_uploadfile_annotation(annotation: Any) -> bool:
if lenient_issubclass(annotation, UploadFile):
return True
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if lenient_issubclass(arg, UploadFile):
return True
return False
def is_bytes_sequence_annotation(annotation: Any) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one = False
for arg in get_args(annotation):
if is_bytes_sequence_annotation(arg):
at_least_one = True
continue
return at_least_one
return field_annotation_is_sequence(annotation) and all(
is_bytes_or_nonable_bytes_annotation(sub_annotation)
for sub_annotation in get_args(annotation)
)
def is_uploadfile_sequence_annotation(annotation: Any) -> bool:
# TODO: Implement this function
def is_pydantic_v1_model_instance(obj: Any) -> bool:
# TODO: remove this function once the required version of Pydantic fully
# removes pydantic.v1
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
from pydantic import v1
except ImportError: # pragma: no cover
return False
return isinstance(obj, v1.BaseModel)
def is_pydantic_v1_model_class(cls: Any) -> bool:
# TODO: remove this function once the required version of Pydantic fully
# removes pydantic.v1
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
from pydantic import v1
except ImportError: # pragma: no cover
return False
return lenient_issubclass(cls, v1.BaseModel)
def annotation_is_pydantic_v1(annotation: Any) -> bool:
if is_pydantic_v1_model_class(annotation):
return True
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if is_pydantic_v1_model_class(arg):
return True
if field_annotation_is_sequence(annotation):
for sub_annotation in get_args(annotation):
if annotation_is_pydantic_v1(sub_annotation):
return True
return False | def is_uploadfile_sequence_annotation(annotation: Any) -> bool: | origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one = False
for arg in get_args(annotation):
if is_uploadfile_sequence_annotation(arg):
at_least_one = True
continue
return at_least_one
return field_annotation_is_sequence(annotation) and all(
is_uploadfile_or_nonable_uploadfile_annotation(sub_annotation)
for sub_annotation in get_args(annotation)
) | def is_uploadfile_sequence_annotation(annotation: Any) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one = False
for arg in get_args(annotation):
if is_uploadfile_sequence_annotation(arg):
at_least_one = True
continue
return at_least_one
return field_annotation_is_sequence(annotation) and all(
is_uploadfile_or_nonable_uploadfile_annotation(sub_annotation)
for sub_annotation in get_args(annotation)
) | [{"test_file": "tests/test_compat.py", "test_function": "test_is_uploadfile_sequence_annotation", "test_content": "from fastapi import FastAPI, UploadFile\nfrom fastapi._compat import (\n Undefined,\n is_uploadfile_sequence_annotation,\n)\nfrom fastapi._compat.shared import is_bytes_sequence_annotation\nfrom fastapi.testclient import TestClient\nfrom pydantic import BaseModel, ConfigDict\nfrom pydantic.fields import FieldInfo\n\n\ndef test_model_field_default_required():\n from fastapi._compat import v2\n\n # For coverage\n field_info = FieldInfo(annotation=str)\n field = v2.ModelField(name=\"foo\", field_info=field_info)\n assert field.default is Undefined\n\n\ndef test_complex():\n app = FastAPI()\n\n @app.post(\"/\")\n def foo(foo: str | list[int]):\n return foo\n\n client = TestClient(app)\n\n response = client.post(\"/\", json=\"bar\")\n assert response.status_code == 200, response.text\n assert response.json() == \"bar\"\n\n response2 = client.post(\"/\", json=[1, 2])\n assert response2.status_code == 200, response2.text\n assert response2.json() == [1, 2]\n\n\ndef test_propagates_pydantic2_model_config():\n app = FastAPI()\n\n class Missing:\n def __bool__(self):\n return False\n\n class EmbeddedModel(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n value: str | Missing = Missing()\n\n class Model(BaseModel):\n model_config = ConfigDict(\n arbitrary_types_allowed=True,\n )\n value: str | Missing = Missing()\n embedded_model: EmbeddedModel = EmbeddedModel()\n\n @app.post(\"/\")\n def foo(req: Model) -> dict[str, str | None]:\n return {\n \"value\": req.value or None,\n \"embedded_value\": req.embedded_model.value or None,\n }\n\n client = TestClient(app)\n\n response = client.post(\"/\", json={})\n assert response.status_code == 200, response.text\n assert response.json() == {\n \"value\": None,\n \"embedded_value\": None,\n }\n\n response2 = client.post(\n \"/\", json={\"value\": \"foo\", \"embedded_model\": {\"value\": \"bar\"}}\n )\n assert response2.status_code == 200, response2.text\n assert response2.json() == {\n \"value\": \"foo\",\n \"embedded_value\": \"bar\",\n }\n\n\ndef test_is_bytes_sequence_annotation_union():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of bytes\n # to be read from files and other types, but I'm not even sure it's a good idea\n # to support it as a first class \"feature\"\n assert is_bytes_sequence_annotation(list[str] | list[bytes])\n\n\ndef test_is_uploadfile_sequence_annotation():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of UploadFile\n # and other types, but I'm not even sure it's a good idea to support it as a first\n # class \"feature\"\n assert is_uploadfile_sequence_annotation(list[str] | list[UploadFile])\n\n\ndef test_serialize_sequence_value_with_optional_list():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly.\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_optional_list_pipe_union():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly (with new syntax).\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_none_first_in_union():\n \"\"\"Test that serialize_sequence_value handles Union[None, List[...]] correctly.\"\"\"\n from typing import Union\n\n from fastapi._compat import v2\n\n # Use Union[None, list[str]] to ensure None comes first in the union args\n field_info = FieldInfo(annotation=Union[None, list[str]]) # noqa: UP007\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"x\", \"y\"])\n assert result == [\"x\", \"y\"]\n assert isinstance(result, list)\n", "framework": "pytest", "test_command": "pytest tests/test_compat.py::test_is_uploadfile_sequence_annotation -xvs"}] | {"repo_url": "https://github.com/fastapi/fastapi", "install_cmd": "pip install -e .", "commit_sha": "7a03018d6a880651d4fc2b5c79419eb233d7aee5", "frozen_requirements": "frozen_requirements/fastapi_fastapi.txt"} | {"body_lines": 12, "file_lines": 215, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0021 | file_overlap | |
repo_patch/0016 | fastapi/fastapi | docs_src/generate_clients/tutorial002_py310.py | get_items | get_items | function | null | from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
price: float
class ResponseMessage(BaseModel):
message: str
class User(BaseModel):
username: str
email: str
@app.post("/items/", response_model=ResponseMessage, tags=["items"])
async def create_item(item: Item):
return {"message": "Item received"}
@app.get("/items/", response_model=list[Item], tags=["items"])
async def get_items():
# TODO: Implement this function
@app.post("/users/", response_model=ResponseMessage, tags=["users"])
async def create_user(user: User):
return {"message": "User received"} | async def get_items(): | return [
{"name": "Plumbus", "price": 3},
{"name": "Portal Gun", "price": 9001},
] | async def get_items():
return [
{"name": "Plumbus", "price": 3},
{"name": "Portal Gun", "price": 9001},
] | [{"test_file": "tests/test_tutorial/test_python_types/test_tutorial005.py", "test_function": "test_get_items", "test_content": "from docs_src.python_types.tutorial005_py310 import get_items\n\n\ndef test_get_items():\n res = get_items(\n \"item_a\",\n \"item_b\",\n \"item_c\",\n \"item_d\",\n \"item_e\",\n )\n assert res == (\"item_a\", \"item_b\", \"item_c\", \"item_d\", \"item_e\")\n", "framework": "pytest", "test_command": "pytest tests/test_tutorial/test_python_types/test_tutorial005.py::test_get_items -xvs"}] | {"repo_url": "https://github.com/fastapi/fastapi", "install_cmd": "pip install -e .", "commit_sha": "7a03018d6a880651d4fc2b5c79419eb233d7aee5", "frozen_requirements": "frozen_requirements/fastapi_fastapi.txt"} | {"body_lines": 4, "file_lines": 37, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0022 | clean | |
repo_patch/0017 | fastapi/fastapi | fastapi/_compat/v2.py | serialize_sequence_value | serialize_sequence_value | function | null | import re
import warnings
from collections.abc import Sequence
from copy import copy
from dataclasses import dataclass, is_dataclass
from enum import Enum
from functools import lru_cache
from typing import (
Annotated,
Any,
Literal,
Union,
cast,
get_args,
get_origin,
)
from fastapi._compat import lenient_issubclass, shared
from fastapi.openapi.constants import REF_TEMPLATE
from fastapi.types import IncEx, ModelNameMap, UnionType
from pydantic import BaseModel, ConfigDict, Field, TypeAdapter, create_model
from pydantic import PydanticSchemaGenerationError as PydanticSchemaGenerationError
from pydantic import PydanticUndefinedAnnotation as PydanticUndefinedAnnotation
from pydantic import ValidationError as ValidationError
from pydantic._internal._schema_generation_shared import ( # type: ignore[attr-defined]
GetJsonSchemaHandler as GetJsonSchemaHandler,
)
from pydantic._internal._typing_extra import eval_type_lenient
from pydantic.fields import FieldInfo as FieldInfo
from pydantic.json_schema import GenerateJsonSchema as _GenerateJsonSchema
from pydantic.json_schema import JsonSchemaValue as JsonSchemaValue
from pydantic_core import CoreSchema as CoreSchema
from pydantic_core import PydanticUndefined
from pydantic_core import Url as Url
from pydantic_core.core_schema import (
with_info_plain_validator_function as with_info_plain_validator_function,
)
RequiredParam = PydanticUndefined
Undefined = PydanticUndefined
evaluate_forwardref = eval_type_lenient
class GenerateJsonSchema(_GenerateJsonSchema):
# TODO: remove when this is merged (or equivalent): https://github.com/pydantic/pydantic/pull/12841
# and dropping support for any version of Pydantic before that one (so, in a very long time)
def bytes_schema(self, schema: CoreSchema) -> JsonSchemaValue:
json_schema = {"type": "string", "contentMediaType": "application/octet-stream"}
bytes_mode = (
self._config.ser_json_bytes
if self.mode == "serialization"
else self._config.val_json_bytes
)
if bytes_mode == "base64":
json_schema["contentEncoding"] = "base64"
self.update_with_validations(json_schema, schema, self.ValidationsMapping.bytes)
return json_schema
# TODO: remove when dropping support for Pydantic < v2.12.3
_Attrs = {
"default": ...,
"default_factory": None,
"alias": None,
"alias_priority": None,
"validation_alias": None,
"serialization_alias": None,
"title": None,
"field_title_generator": None,
"description": None,
"examples": None,
"exclude": None,
"exclude_if": None,
"discriminator": None,
"deprecated": None,
"json_schema_extra": None,
"frozen": None,
"validate_default": None,
"repr": True,
"init": None,
"init_var": None,
"kw_only": None,
}
# TODO: remove when dropping support for Pydantic < v2.12.3
def asdict(field_info: FieldInfo) -> dict[str, Any]:
attributes = {}
for attr in _Attrs:
value = getattr(field_info, attr, Undefined)
if value is not Undefined:
attributes[attr] = value
return {
"annotation": field_info.annotation,
"metadata": field_info.metadata,
"attributes": attributes,
}
@dataclass
class ModelField:
field_info: FieldInfo
name: str
mode: Literal["validation", "serialization"] = "validation"
config: ConfigDict | None = None
@property
def alias(self) -> str:
a = self.field_info.alias
return a if a is not None else self.name
@property
def validation_alias(self) -> str | None:
va = self.field_info.validation_alias
if isinstance(va, str) and va:
return va
return None
@property
def serialization_alias(self) -> str | None:
sa = self.field_info.serialization_alias
return sa or None
@property
def default(self) -> Any:
return self.get_default()
def __post_init__(self) -> None:
with warnings.catch_warnings():
# Pydantic >= 2.12.0 warns about field specific metadata that is unused
# (e.g. `TypeAdapter(Annotated[int, Field(alias='b')])`). In some cases, we
# end up building the type adapter from a model field annotation so we
# need to ignore the warning:
if shared.PYDANTIC_VERSION_MINOR_TUPLE >= (2, 12):
from pydantic.warnings import UnsupportedFieldAttributeWarning
warnings.simplefilter(
"ignore", category=UnsupportedFieldAttributeWarning
)
# TODO: remove after setting the min Pydantic to v2.12.3
# that adds asdict(), and use self.field_info.asdict() instead
field_dict = asdict(self.field_info)
annotated_args = (
field_dict["annotation"],
*field_dict["metadata"],
# this FieldInfo needs to be created again so that it doesn't include
# the old field info metadata and only the rest of the attributes
Field(**field_dict["attributes"]),
)
self._type_adapter: TypeAdapter[Any] = TypeAdapter(
Annotated[annotated_args],
config=self.config,
)
def get_default(self) -> Any:
if self.field_info.is_required():
return Undefined
return self.field_info.get_default(call_default_factory=True)
def validate(
self,
value: Any,
values: dict[str, Any] = {}, # noqa: B006
*,
loc: tuple[int | str, ...] = (),
) -> tuple[Any, list[dict[str, Any]]]:
try:
return (
self._type_adapter.validate_python(value, from_attributes=True),
[],
)
except ValidationError as exc:
return None, _regenerate_error_with_loc(
errors=exc.errors(include_url=False), loc_prefix=loc
)
def serialize(
self,
value: Any,
*,
mode: Literal["json", "python"] = "json",
include: IncEx | None = None,
exclude: IncEx | None = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> Any:
# What calls this code passes a value that already called
# self._type_adapter.validate_python(value)
return self._type_adapter.dump_python(
value,
mode=mode,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
def serialize_json(
self,
value: Any,
*,
include: IncEx | None = None,
exclude: IncEx | None = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> bytes:
# What calls this code passes a value that already called
# self._type_adapter.validate_python(value)
# This uses Pydantic's dump_json() which serializes directly to JSON
# bytes in one pass (via Rust), avoiding the intermediate Python dict
# step of dump_python(mode="json") + json.dumps().
return self._type_adapter.dump_json(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
def __hash__(self) -> int:
# Each ModelField is unique for our purposes, to allow making a dict from
# ModelField to its JSON Schema.
return id(self)
def _has_computed_fields(field: ModelField) -> bool:
computed_fields = field._type_adapter.core_schema.get("schema", {}).get(
"computed_fields", []
)
return len(computed_fields) > 0
def get_schema_from_model_field(
*,
field: ModelField,
model_name_map: ModelNameMap,
field_mapping: dict[
tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue
],
separate_input_output_schemas: bool = True,
) -> dict[str, Any]:
override_mode: Literal["validation"] | None = (
None
if (separate_input_output_schemas or _has_computed_fields(field))
else "validation"
)
field_alias = (
(field.validation_alias or field.alias)
if field.mode == "validation"
else (field.serialization_alias or field.alias)
)
# This expects that GenerateJsonSchema was already used to generate the definitions
json_schema = field_mapping[(field, override_mode or field.mode)]
if "$ref" not in json_schema:
# TODO remove when deprecating Pydantic v1
# Ref: https://github.com/pydantic/pydantic/blob/d61792cc42c80b13b23e3ffa74bc37ec7c77f7d1/pydantic/schema.py#L207
json_schema["title"] = field.field_info.title or field_alias.title().replace(
"_", " "
)
return json_schema
def get_definitions(
*,
fields: Sequence[ModelField],
model_name_map: ModelNameMap,
separate_input_output_schemas: bool = True,
) -> tuple[
dict[tuple[ModelField, Literal["validation", "serialization"]], JsonSchemaValue],
dict[str, dict[str, Any]],
]:
schema_generator = GenerateJsonSchema(ref_template=REF_TEMPLATE)
validation_fields = [field for field in fields if field.mode == "validation"]
serialization_fields = [field for field in fields if field.mode == "serialization"]
flat_validation_models = get_flat_models_from_fields(
validation_fields, known_models=set()
)
flat_serialization_models = get_flat_models_from_fields(
serialization_fields, known_models=set()
)
flat_validation_model_fields = [
ModelField(
field_info=FieldInfo(annotation=model),
name=model.__name__,
mode="validation",
)
for model in flat_validation_models
]
flat_serialization_model_fields = [
ModelField(
field_info=FieldInfo(annotation=model),
name=model.__name__,
mode="serialization",
)
for model in flat_serialization_models
]
flat_model_fields = flat_validation_model_fields + flat_serialization_model_fields
input_types = {f.field_info.annotation for f in fields}
unique_flat_model_fields = {
f for f in flat_model_fields if f.field_info.annotation not in input_types
}
inputs = [
(
field,
(
field.mode
if (separate_input_output_schemas or _has_computed_fields(field))
else "validation"
),
field._type_adapter.core_schema,
)
for field in list(fields) + list(unique_flat_model_fields)
]
field_mapping, definitions = schema_generator.generate_definitions(inputs=inputs)
for item_def in cast(dict[str, dict[str, Any]], definitions).values():
if "description" in item_def:
item_description = cast(str, item_def["description"]).split("\f")[0]
item_def["description"] = item_description
# definitions: dict[DefsRef, dict[str, Any]]
# but mypy complains about general str in other places that are not declared as
# DefsRef, although DefsRef is just str:
# DefsRef = NewType('DefsRef', str)
# So, a cast to simplify the types here
return field_mapping, cast(dict[str, dict[str, Any]], definitions)
def is_scalar_field(field: ModelField) -> bool:
from fastapi import params
return shared.field_annotation_is_scalar(
field.field_info.annotation
) and not isinstance(field.field_info, params.Body)
def copy_field_info(*, field_info: FieldInfo, annotation: Any) -> FieldInfo:
cls = type(field_info)
merged_field_info = cls.from_annotation(annotation)
new_field_info = copy(field_info)
new_field_info.metadata = merged_field_info.metadata
new_field_info.annotation = merged_field_info.annotation
return new_field_info
def serialize_sequence_value(*, field: ModelField, value: Any) -> Sequence[Any]:
# TODO: Implement this function
def get_missing_field_error(loc: tuple[int | str, ...]) -> dict[str, Any]:
error = ValidationError.from_exception_data(
"Field required", [{"type": "missing", "loc": loc, "input": {}}]
).errors(include_url=False)[0]
error["input"] = None
return error # type: ignore[return-value]
def create_body_model(
*, fields: Sequence[ModelField], model_name: str
) -> type[BaseModel]:
field_params = {f.name: (f.field_info.annotation, f.field_info) for f in fields}
BodyModel: type[BaseModel] = create_model(model_name, **field_params) # type: ignore[call-overload]
return BodyModel
def get_model_fields(model: type[BaseModel]) -> list[ModelField]:
model_fields: list[ModelField] = []
for name, field_info in model.model_fields.items():
type_ = field_info.annotation
if lenient_issubclass(type_, (BaseModel, dict)) or is_dataclass(type_):
model_config = None
else:
model_config = model.model_config
model_fields.append(
ModelField(
field_info=field_info,
name=name,
config=model_config,
)
)
return model_fields
@lru_cache
def get_cached_model_fields(model: type[BaseModel]) -> list[ModelField]:
return get_model_fields(model)
# Duplicate of several schema functions from Pydantic v1 to make them compatible with
# Pydantic v2 and allow mixing the models
TypeModelOrEnum = type["BaseModel"] | type[Enum]
TypeModelSet = set[TypeModelOrEnum]
def normalize_name(name: str) -> str:
return re.sub(r"[^a-zA-Z0-9.\-_]", "_", name)
def get_model_name_map(unique_models: TypeModelSet) -> dict[TypeModelOrEnum, str]:
name_model_map = {}
for model in unique_models:
model_name = normalize_name(model.__name__)
name_model_map[model_name] = model
return {v: k for k, v in name_model_map.items()}
def get_flat_models_from_model(
model: type["BaseModel"], known_models: TypeModelSet | None = None
) -> TypeModelSet:
known_models = known_models or set()
fields = get_model_fields(model)
get_flat_models_from_fields(fields, known_models=known_models)
return known_models
def get_flat_models_from_annotation(
annotation: Any, known_models: TypeModelSet
) -> TypeModelSet:
origin = get_origin(annotation)
if origin is not None:
for arg in get_args(annotation):
if lenient_issubclass(arg, (BaseModel, Enum)):
if arg not in known_models:
known_models.add(arg) # type: ignore[arg-type]
if lenient_issubclass(arg, BaseModel):
get_flat_models_from_model(arg, known_models=known_models)
else:
get_flat_models_from_annotation(arg, known_models=known_models)
return known_models
def get_flat_models_from_field(
field: ModelField, known_models: TypeModelSet
) -> TypeModelSet:
field_type = field.field_info.annotation
if lenient_issubclass(field_type, BaseModel):
if field_type in known_models:
return known_models
known_models.add(field_type)
get_flat_models_from_model(field_type, known_models=known_models)
elif lenient_issubclass(field_type, Enum):
known_models.add(field_type)
else:
get_flat_models_from_annotation(field_type, known_models=known_models)
return known_models
def get_flat_models_from_fields(
fields: Sequence[ModelField], known_models: TypeModelSet
) -> TypeModelSet:
for field in fields:
get_flat_models_from_field(field, known_models=known_models)
return known_models
def _regenerate_error_with_loc(
*, errors: Sequence[Any], loc_prefix: tuple[str | int, ...]
) -> list[dict[str, Any]]:
updated_loc_errors: list[Any] = [
{**err, "loc": loc_prefix + err.get("loc", ())} for err in errors
]
return updated_loc_errors | def serialize_sequence_value(*, field: ModelField, value: Any) -> Sequence[Any]: | origin_type = get_origin(field.field_info.annotation) or field.field_info.annotation
if origin_type is Union or origin_type is UnionType: # Handle optional sequences
union_args = get_args(field.field_info.annotation)
for union_arg in union_args:
if union_arg is type(None):
continue
origin_type = get_origin(union_arg) or union_arg
break
assert issubclass(origin_type, shared.sequence_types) # type: ignore[arg-type]
return shared.sequence_annotation_to_type[origin_type](value) # type: ignore[no-any-return,index] | def serialize_sequence_value(*, field: ModelField, value: Any) -> Sequence[Any]:
origin_type = get_origin(field.field_info.annotation) or field.field_info.annotation
if origin_type is Union or origin_type is UnionType: # Handle optional sequences
union_args = get_args(field.field_info.annotation)
for union_arg in union_args:
if union_arg is type(None):
continue
origin_type = get_origin(union_arg) or union_arg
break
assert issubclass(origin_type, shared.sequence_types) # type: ignore[arg-type]
return shared.sequence_annotation_to_type[origin_type](value) # type: ignore[no-any-return,index] | [{"test_file": "tests/test_compat.py", "test_function": "test_serialize_sequence_value_with_optional_list", "test_content": "from fastapi import FastAPI, UploadFile\nfrom fastapi._compat import (\n Undefined,\n is_uploadfile_sequence_annotation,\n)\nfrom fastapi._compat.shared import is_bytes_sequence_annotation\nfrom fastapi.testclient import TestClient\nfrom pydantic import BaseModel, ConfigDict\nfrom pydantic.fields import FieldInfo\n\n\ndef test_model_field_default_required():\n from fastapi._compat import v2\n\n # For coverage\n field_info = FieldInfo(annotation=str)\n field = v2.ModelField(name=\"foo\", field_info=field_info)\n assert field.default is Undefined\n\n\ndef test_complex():\n app = FastAPI()\n\n @app.post(\"/\")\n def foo(foo: str | list[int]):\n return foo\n\n client = TestClient(app)\n\n response = client.post(\"/\", json=\"bar\")\n assert response.status_code == 200, response.text\n assert response.json() == \"bar\"\n\n response2 = client.post(\"/\", json=[1, 2])\n assert response2.status_code == 200, response2.text\n assert response2.json() == [1, 2]\n\n\ndef test_propagates_pydantic2_model_config():\n app = FastAPI()\n\n class Missing:\n def __bool__(self):\n return False\n\n class EmbeddedModel(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n value: str | Missing = Missing()\n\n class Model(BaseModel):\n model_config = ConfigDict(\n arbitrary_types_allowed=True,\n )\n value: str | Missing = Missing()\n embedded_model: EmbeddedModel = EmbeddedModel()\n\n @app.post(\"/\")\n def foo(req: Model) -> dict[str, str | None]:\n return {\n \"value\": req.value or None,\n \"embedded_value\": req.embedded_model.value or None,\n }\n\n client = TestClient(app)\n\n response = client.post(\"/\", json={})\n assert response.status_code == 200, response.text\n assert response.json() == {\n \"value\": None,\n \"embedded_value\": None,\n }\n\n response2 = client.post(\n \"/\", json={\"value\": \"foo\", \"embedded_model\": {\"value\": \"bar\"}}\n )\n assert response2.status_code == 200, response2.text\n assert response2.json() == {\n \"value\": \"foo\",\n \"embedded_value\": \"bar\",\n }\n\n\ndef test_is_bytes_sequence_annotation_union():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of bytes\n # to be read from files and other types, but I'm not even sure it's a good idea\n # to support it as a first class \"feature\"\n assert is_bytes_sequence_annotation(list[str] | list[bytes])\n\n\ndef test_is_uploadfile_sequence_annotation():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of UploadFile\n # and other types, but I'm not even sure it's a good idea to support it as a first\n # class \"feature\"\n assert is_uploadfile_sequence_annotation(list[str] | list[UploadFile])\n\n\ndef test_serialize_sequence_value_with_optional_list():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly.\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_optional_list_pipe_union():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly (with new syntax).\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_none_first_in_union():\n \"\"\"Test that serialize_sequence_value handles Union[None, List[...]] correctly.\"\"\"\n from typing import Union\n\n from fastapi._compat import v2\n\n # Use Union[None, list[str]] to ensure None comes first in the union args\n field_info = FieldInfo(annotation=Union[None, list[str]]) # noqa: UP007\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"x\", \"y\"])\n assert result == [\"x\", \"y\"]\n assert isinstance(result, list)\n", "framework": "pytest", "test_command": "pytest tests/test_compat.py::test_serialize_sequence_value_with_optional_list -xvs"}, {"test_file": "tests/test_compat.py", "test_function": "test_serialize_sequence_value_with_optional_list_pipe_union", "test_content": "from fastapi import FastAPI, UploadFile\nfrom fastapi._compat import (\n Undefined,\n is_uploadfile_sequence_annotation,\n)\nfrom fastapi._compat.shared import is_bytes_sequence_annotation\nfrom fastapi.testclient import TestClient\nfrom pydantic import BaseModel, ConfigDict\nfrom pydantic.fields import FieldInfo\n\n\ndef test_model_field_default_required():\n from fastapi._compat import v2\n\n # For coverage\n field_info = FieldInfo(annotation=str)\n field = v2.ModelField(name=\"foo\", field_info=field_info)\n assert field.default is Undefined\n\n\ndef test_complex():\n app = FastAPI()\n\n @app.post(\"/\")\n def foo(foo: str | list[int]):\n return foo\n\n client = TestClient(app)\n\n response = client.post(\"/\", json=\"bar\")\n assert response.status_code == 200, response.text\n assert response.json() == \"bar\"\n\n response2 = client.post(\"/\", json=[1, 2])\n assert response2.status_code == 200, response2.text\n assert response2.json() == [1, 2]\n\n\ndef test_propagates_pydantic2_model_config():\n app = FastAPI()\n\n class Missing:\n def __bool__(self):\n return False\n\n class EmbeddedModel(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n value: str | Missing = Missing()\n\n class Model(BaseModel):\n model_config = ConfigDict(\n arbitrary_types_allowed=True,\n )\n value: str | Missing = Missing()\n embedded_model: EmbeddedModel = EmbeddedModel()\n\n @app.post(\"/\")\n def foo(req: Model) -> dict[str, str | None]:\n return {\n \"value\": req.value or None,\n \"embedded_value\": req.embedded_model.value or None,\n }\n\n client = TestClient(app)\n\n response = client.post(\"/\", json={})\n assert response.status_code == 200, response.text\n assert response.json() == {\n \"value\": None,\n \"embedded_value\": None,\n }\n\n response2 = client.post(\n \"/\", json={\"value\": \"foo\", \"embedded_model\": {\"value\": \"bar\"}}\n )\n assert response2.status_code == 200, response2.text\n assert response2.json() == {\n \"value\": \"foo\",\n \"embedded_value\": \"bar\",\n }\n\n\ndef test_is_bytes_sequence_annotation_union():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of bytes\n # to be read from files and other types, but I'm not even sure it's a good idea\n # to support it as a first class \"feature\"\n assert is_bytes_sequence_annotation(list[str] | list[bytes])\n\n\ndef test_is_uploadfile_sequence_annotation():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of UploadFile\n # and other types, but I'm not even sure it's a good idea to support it as a first\n # class \"feature\"\n assert is_uploadfile_sequence_annotation(list[str] | list[UploadFile])\n\n\ndef test_serialize_sequence_value_with_optional_list():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly.\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_optional_list_pipe_union():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly (with new syntax).\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_none_first_in_union():\n \"\"\"Test that serialize_sequence_value handles Union[None, List[...]] correctly.\"\"\"\n from typing import Union\n\n from fastapi._compat import v2\n\n # Use Union[None, list[str]] to ensure None comes first in the union args\n field_info = FieldInfo(annotation=Union[None, list[str]]) # noqa: UP007\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"x\", \"y\"])\n assert result == [\"x\", \"y\"]\n assert isinstance(result, list)\n", "framework": "pytest", "test_command": "pytest tests/test_compat.py::test_serialize_sequence_value_with_optional_list_pipe_union -xvs"}, {"test_file": "tests/test_compat.py", "test_function": "test_serialize_sequence_value_with_none_first_in_union", "test_content": "from fastapi import FastAPI, UploadFile\nfrom fastapi._compat import (\n Undefined,\n is_uploadfile_sequence_annotation,\n)\nfrom fastapi._compat.shared import is_bytes_sequence_annotation\nfrom fastapi.testclient import TestClient\nfrom pydantic import BaseModel, ConfigDict\nfrom pydantic.fields import FieldInfo\n\n\ndef test_model_field_default_required():\n from fastapi._compat import v2\n\n # For coverage\n field_info = FieldInfo(annotation=str)\n field = v2.ModelField(name=\"foo\", field_info=field_info)\n assert field.default is Undefined\n\n\ndef test_complex():\n app = FastAPI()\n\n @app.post(\"/\")\n def foo(foo: str | list[int]):\n return foo\n\n client = TestClient(app)\n\n response = client.post(\"/\", json=\"bar\")\n assert response.status_code == 200, response.text\n assert response.json() == \"bar\"\n\n response2 = client.post(\"/\", json=[1, 2])\n assert response2.status_code == 200, response2.text\n assert response2.json() == [1, 2]\n\n\ndef test_propagates_pydantic2_model_config():\n app = FastAPI()\n\n class Missing:\n def __bool__(self):\n return False\n\n class EmbeddedModel(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n value: str | Missing = Missing()\n\n class Model(BaseModel):\n model_config = ConfigDict(\n arbitrary_types_allowed=True,\n )\n value: str | Missing = Missing()\n embedded_model: EmbeddedModel = EmbeddedModel()\n\n @app.post(\"/\")\n def foo(req: Model) -> dict[str, str | None]:\n return {\n \"value\": req.value or None,\n \"embedded_value\": req.embedded_model.value or None,\n }\n\n client = TestClient(app)\n\n response = client.post(\"/\", json={})\n assert response.status_code == 200, response.text\n assert response.json() == {\n \"value\": None,\n \"embedded_value\": None,\n }\n\n response2 = client.post(\n \"/\", json={\"value\": \"foo\", \"embedded_model\": {\"value\": \"bar\"}}\n )\n assert response2.status_code == 200, response2.text\n assert response2.json() == {\n \"value\": \"foo\",\n \"embedded_value\": \"bar\",\n }\n\n\ndef test_is_bytes_sequence_annotation_union():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of bytes\n # to be read from files and other types, but I'm not even sure it's a good idea\n # to support it as a first class \"feature\"\n assert is_bytes_sequence_annotation(list[str] | list[bytes])\n\n\ndef test_is_uploadfile_sequence_annotation():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of UploadFile\n # and other types, but I'm not even sure it's a good idea to support it as a first\n # class \"feature\"\n assert is_uploadfile_sequence_annotation(list[str] | list[UploadFile])\n\n\ndef test_serialize_sequence_value_with_optional_list():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly.\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_optional_list_pipe_union():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly (with new syntax).\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_none_first_in_union():\n \"\"\"Test that serialize_sequence_value handles Union[None, List[...]] correctly.\"\"\"\n from typing import Union\n\n from fastapi._compat import v2\n\n # Use Union[None, list[str]] to ensure None comes first in the union args\n field_info = FieldInfo(annotation=Union[None, list[str]]) # noqa: UP007\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"x\", \"y\"])\n assert result == [\"x\", \"y\"]\n assert isinstance(result, list)\n", "framework": "pytest", "test_command": "pytest tests/test_compat.py::test_serialize_sequence_value_with_none_first_in_union -xvs"}] | {"repo_url": "https://github.com/fastapi/fastapi", "install_cmd": "pip install -e .", "commit_sha": "7a03018d6a880651d4fc2b5c79419eb233d7aee5", "frozen_requirements": "frozen_requirements/fastapi_fastapi.txt"} | {"body_lines": 10, "file_lines": 481, "has_docstring": false, "num_tests": 3} | {"status": "passed", "tests_run": 3} | repo_patch/0023 | file_overlap | |
repo_patch/0018 | fastapi/fastapi | docs_src/generate_clients/tutorial001_py310.py | get_items | get_items | function | null | from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: str
price: float
class ResponseMessage(BaseModel):
message: str
@app.post("/items/", response_model=ResponseMessage)
async def create_item(item: Item):
return {"message": "item received"}
@app.get("/items/", response_model=list[Item])
async def get_items():
# TODO: Implement this function | async def get_items(): | return [
{"name": "Plumbus", "price": 3},
{"name": "Portal Gun", "price": 9001},
] | async def get_items():
return [
{"name": "Plumbus", "price": 3},
{"name": "Portal Gun", "price": 9001},
] | [{"test_file": "tests/test_tutorial/test_python_types/test_tutorial005.py", "test_function": "test_get_items", "test_content": "from docs_src.python_types.tutorial005_py310 import get_items\n\n\ndef test_get_items():\n res = get_items(\n \"item_a\",\n \"item_b\",\n \"item_c\",\n \"item_d\",\n \"item_e\",\n )\n assert res == (\"item_a\", \"item_b\", \"item_c\", \"item_d\", \"item_e\")\n", "framework": "pytest", "test_command": "pytest tests/test_tutorial/test_python_types/test_tutorial005.py::test_get_items -xvs"}] | {"repo_url": "https://github.com/fastapi/fastapi", "install_cmd": "pip install -e .", "commit_sha": "7a03018d6a880651d4fc2b5c79419eb233d7aee5", "frozen_requirements": "frozen_requirements/fastapi_fastapi.txt"} | {"body_lines": 4, "file_lines": 27, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0024 | clean | |
repo_patch/0019 | fastapi/fastapi | docs_src/generate_clients/tutorial003_py310.py | get_items | get_items | function | null | from fastapi import FastAPI
from fastapi.routing import APIRoute
from pydantic import BaseModel
def custom_generate_unique_id(route: APIRoute):
return f"{route.tags[0]}-{route.name}"
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
class Item(BaseModel):
name: str
price: float
class ResponseMessage(BaseModel):
message: str
class User(BaseModel):
username: str
email: str
@app.post("/items/", response_model=ResponseMessage, tags=["items"])
async def create_item(item: Item):
return {"message": "Item received"}
@app.get("/items/", response_model=list[Item], tags=["items"])
async def get_items():
# TODO: Implement this function
@app.post("/users/", response_model=ResponseMessage, tags=["users"])
async def create_user(user: User):
return {"message": "User received"} | async def get_items(): | return [
{"name": "Plumbus", "price": 3},
{"name": "Portal Gun", "price": 9001},
] | async def get_items():
return [
{"name": "Plumbus", "price": 3},
{"name": "Portal Gun", "price": 9001},
] | [{"test_file": "tests/test_tutorial/test_python_types/test_tutorial005.py", "test_function": "test_get_items", "test_content": "from docs_src.python_types.tutorial005_py310 import get_items\n\n\ndef test_get_items():\n res = get_items(\n \"item_a\",\n \"item_b\",\n \"item_c\",\n \"item_d\",\n \"item_e\",\n )\n assert res == (\"item_a\", \"item_b\", \"item_c\", \"item_d\", \"item_e\")\n", "framework": "pytest", "test_command": "pytest tests/test_tutorial/test_python_types/test_tutorial005.py::test_get_items -xvs"}] | {"repo_url": "https://github.com/fastapi/fastapi", "install_cmd": "pip install -e .", "commit_sha": "7a03018d6a880651d4fc2b5c79419eb233d7aee5", "frozen_requirements": "frozen_requirements/fastapi_fastapi.txt"} | {"body_lines": 4, "file_lines": 43, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0025 | clean | |
repo_patch/0020 | fastapi/fastapi | fastapi/_compat/shared.py | is_bytes_sequence_annotation | is_bytes_sequence_annotation | function | null | import types
import typing
import warnings
from collections import deque
from collections.abc import Mapping, Sequence
from dataclasses import is_dataclass
from typing import (
Annotated,
Any,
TypeGuard,
TypeVar,
Union,
get_args,
get_origin,
)
from fastapi.types import UnionType
from pydantic import BaseModel
from pydantic.version import VERSION as PYDANTIC_VERSION
from starlette.datastructures import UploadFile
_T = TypeVar("_T")
# Copy from Pydantic: pydantic/_internal/_typing_extra.py
WithArgsTypes: tuple[Any, ...] = (
typing._GenericAlias, # type: ignore[attr-defined]
types.GenericAlias,
types.UnionType,
) # pyright: ignore[reportAttributeAccessIssue]
PYDANTIC_VERSION_MINOR_TUPLE = tuple(int(x) for x in PYDANTIC_VERSION.split(".")[:2])
sequence_annotation_to_type = {
Sequence: list,
list: list,
tuple: tuple,
set: set,
frozenset: frozenset,
deque: deque,
}
sequence_types: tuple[type[Any], ...] = tuple(sequence_annotation_to_type.keys())
# Copy of Pydantic: pydantic/_internal/_utils.py with added TypeGuard
def lenient_issubclass(
cls: Any, class_or_tuple: type[_T] | tuple[type[_T], ...] | None
) -> TypeGuard[type[_T]]:
try:
return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type]
except TypeError: # pragma: no cover
if isinstance(cls, WithArgsTypes):
return False
raise # pragma: no cover
def _annotation_is_sequence(annotation: type[Any] | None) -> bool:
if lenient_issubclass(annotation, (str, bytes)):
return False
return lenient_issubclass(annotation, sequence_types)
def field_annotation_is_sequence(annotation: type[Any] | None) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if field_annotation_is_sequence(arg):
return True
return False
return _annotation_is_sequence(annotation) or _annotation_is_sequence(
get_origin(annotation)
)
def value_is_sequence(value: Any) -> bool:
return isinstance(value, sequence_types) and not isinstance(value, (str, bytes))
def _annotation_is_complex(annotation: type[Any] | None) -> bool:
return (
lenient_issubclass(annotation, (BaseModel, Mapping, UploadFile))
or _annotation_is_sequence(annotation)
or is_dataclass(annotation)
)
def field_annotation_is_complex(annotation: type[Any] | None) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
return any(field_annotation_is_complex(arg) for arg in get_args(annotation))
if origin is Annotated:
return field_annotation_is_complex(get_args(annotation)[0])
return (
_annotation_is_complex(annotation)
or _annotation_is_complex(origin)
or hasattr(origin, "__pydantic_core_schema__")
or hasattr(origin, "__get_pydantic_core_schema__")
)
def field_annotation_is_scalar(annotation: Any) -> bool:
# handle Ellipsis here to make tuple[int, ...] work nicely
return annotation is Ellipsis or not field_annotation_is_complex(annotation)
def field_annotation_is_scalar_sequence(annotation: type[Any] | None) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one_scalar_sequence = False
for arg in get_args(annotation):
if field_annotation_is_scalar_sequence(arg):
at_least_one_scalar_sequence = True
continue
elif not field_annotation_is_scalar(arg):
return False
return at_least_one_scalar_sequence
return field_annotation_is_sequence(annotation) and all(
field_annotation_is_scalar(sub_annotation)
for sub_annotation in get_args(annotation)
)
def is_bytes_or_nonable_bytes_annotation(annotation: Any) -> bool:
if lenient_issubclass(annotation, bytes):
return True
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if lenient_issubclass(arg, bytes):
return True
return False
def is_uploadfile_or_nonable_uploadfile_annotation(annotation: Any) -> bool:
if lenient_issubclass(annotation, UploadFile):
return True
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if lenient_issubclass(arg, UploadFile):
return True
return False
def is_bytes_sequence_annotation(annotation: Any) -> bool:
# TODO: Implement this function
def is_uploadfile_sequence_annotation(annotation: Any) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one = False
for arg in get_args(annotation):
if is_uploadfile_sequence_annotation(arg):
at_least_one = True
continue
return at_least_one
return field_annotation_is_sequence(annotation) and all(
is_uploadfile_or_nonable_uploadfile_annotation(sub_annotation)
for sub_annotation in get_args(annotation)
)
def is_pydantic_v1_model_instance(obj: Any) -> bool:
# TODO: remove this function once the required version of Pydantic fully
# removes pydantic.v1
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
from pydantic import v1
except ImportError: # pragma: no cover
return False
return isinstance(obj, v1.BaseModel)
def is_pydantic_v1_model_class(cls: Any) -> bool:
# TODO: remove this function once the required version of Pydantic fully
# removes pydantic.v1
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
from pydantic import v1
except ImportError: # pragma: no cover
return False
return lenient_issubclass(cls, v1.BaseModel)
def annotation_is_pydantic_v1(annotation: Any) -> bool:
if is_pydantic_v1_model_class(annotation):
return True
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
for arg in get_args(annotation):
if is_pydantic_v1_model_class(arg):
return True
if field_annotation_is_sequence(annotation):
for sub_annotation in get_args(annotation):
if annotation_is_pydantic_v1(sub_annotation):
return True
return False | def is_bytes_sequence_annotation(annotation: Any) -> bool: | origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one = False
for arg in get_args(annotation):
if is_bytes_sequence_annotation(arg):
at_least_one = True
continue
return at_least_one
return field_annotation_is_sequence(annotation) and all(
is_bytes_or_nonable_bytes_annotation(sub_annotation)
for sub_annotation in get_args(annotation)
) | def is_bytes_sequence_annotation(annotation: Any) -> bool:
origin = get_origin(annotation)
if origin is Union or origin is UnionType:
at_least_one = False
for arg in get_args(annotation):
if is_bytes_sequence_annotation(arg):
at_least_one = True
continue
return at_least_one
return field_annotation_is_sequence(annotation) and all(
is_bytes_or_nonable_bytes_annotation(sub_annotation)
for sub_annotation in get_args(annotation)
) | [{"test_file": "tests/test_compat.py", "test_function": "test_is_bytes_sequence_annotation_union", "test_content": "from fastapi import FastAPI, UploadFile\nfrom fastapi._compat import (\n Undefined,\n is_uploadfile_sequence_annotation,\n)\nfrom fastapi._compat.shared import is_bytes_sequence_annotation\nfrom fastapi.testclient import TestClient\nfrom pydantic import BaseModel, ConfigDict\nfrom pydantic.fields import FieldInfo\n\n\ndef test_model_field_default_required():\n from fastapi._compat import v2\n\n # For coverage\n field_info = FieldInfo(annotation=str)\n field = v2.ModelField(name=\"foo\", field_info=field_info)\n assert field.default is Undefined\n\n\ndef test_complex():\n app = FastAPI()\n\n @app.post(\"/\")\n def foo(foo: str | list[int]):\n return foo\n\n client = TestClient(app)\n\n response = client.post(\"/\", json=\"bar\")\n assert response.status_code == 200, response.text\n assert response.json() == \"bar\"\n\n response2 = client.post(\"/\", json=[1, 2])\n assert response2.status_code == 200, response2.text\n assert response2.json() == [1, 2]\n\n\ndef test_propagates_pydantic2_model_config():\n app = FastAPI()\n\n class Missing:\n def __bool__(self):\n return False\n\n class EmbeddedModel(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n value: str | Missing = Missing()\n\n class Model(BaseModel):\n model_config = ConfigDict(\n arbitrary_types_allowed=True,\n )\n value: str | Missing = Missing()\n embedded_model: EmbeddedModel = EmbeddedModel()\n\n @app.post(\"/\")\n def foo(req: Model) -> dict[str, str | None]:\n return {\n \"value\": req.value or None,\n \"embedded_value\": req.embedded_model.value or None,\n }\n\n client = TestClient(app)\n\n response = client.post(\"/\", json={})\n assert response.status_code == 200, response.text\n assert response.json() == {\n \"value\": None,\n \"embedded_value\": None,\n }\n\n response2 = client.post(\n \"/\", json={\"value\": \"foo\", \"embedded_model\": {\"value\": \"bar\"}}\n )\n assert response2.status_code == 200, response2.text\n assert response2.json() == {\n \"value\": \"foo\",\n \"embedded_value\": \"bar\",\n }\n\n\ndef test_is_bytes_sequence_annotation_union():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of bytes\n # to be read from files and other types, but I'm not even sure it's a good idea\n # to support it as a first class \"feature\"\n assert is_bytes_sequence_annotation(list[str] | list[bytes])\n\n\ndef test_is_uploadfile_sequence_annotation():\n # For coverage\n # TODO: in theory this would allow declaring types that could be lists of UploadFile\n # and other types, but I'm not even sure it's a good idea to support it as a first\n # class \"feature\"\n assert is_uploadfile_sequence_annotation(list[str] | list[UploadFile])\n\n\ndef test_serialize_sequence_value_with_optional_list():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly.\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_optional_list_pipe_union():\n \"\"\"Test that serialize_sequence_value handles optional lists correctly (with new syntax).\"\"\"\n from fastapi._compat import v2\n\n field_info = FieldInfo(annotation=list[str] | None)\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"a\", \"b\", \"c\"])\n assert result == [\"a\", \"b\", \"c\"]\n assert isinstance(result, list)\n\n\ndef test_serialize_sequence_value_with_none_first_in_union():\n \"\"\"Test that serialize_sequence_value handles Union[None, List[...]] correctly.\"\"\"\n from typing import Union\n\n from fastapi._compat import v2\n\n # Use Union[None, list[str]] to ensure None comes first in the union args\n field_info = FieldInfo(annotation=Union[None, list[str]]) # noqa: UP007\n field = v2.ModelField(name=\"items\", field_info=field_info)\n result = v2.serialize_sequence_value(field=field, value=[\"x\", \"y\"])\n assert result == [\"x\", \"y\"]\n assert isinstance(result, list)\n", "framework": "pytest", "test_command": "pytest tests/test_compat.py::test_is_bytes_sequence_annotation_union -xvs"}] | {"repo_url": "https://github.com/fastapi/fastapi", "install_cmd": "pip install -e .", "commit_sha": "7a03018d6a880651d4fc2b5c79419eb233d7aee5", "frozen_requirements": "frozen_requirements/fastapi_fastapi.txt"} | {"body_lines": 12, "file_lines": 215, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0026 | file_overlap | |
repo_patch/0021 | hiyouga/LlamaFactory | src/llamafactory/v1/plugins/model_plugins/peft.py | get_lora_model | get_lora_model | function | null | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Literal, TypedDict, Union
import torch
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from ...config import InputArgument, get_args
from ...core.model_engine import ModelEngine
from ...utils import logging
from ...utils.plugin import BasePlugin
from ...utils.types import HFModel
logger = logging.get_logger(__name__)
class LoraConfigDict(TypedDict, total=False):
name: Literal["lora"]
"""Plugin name."""
r: int
"""Lora rank."""
lora_alpha: int
"""Lora alpha."""
lora_dropout: float
"""Lora dropout."""
target_modules: Union[list[str], str]
"""Target modules."""
use_rslora: bool
"""Use RS-LoRA."""
use_dora: bool
"""Use DoRA."""
modules_to_save: list[str]
"""Modules to save."""
adapter_name_or_path: Union[list[str], str]
"""Path to the adapter(s)."""
export_dir: str
"""Path to the export directory."""
export_size: int
"""Shard size for the export model."""
export_hub_model_id: str
"""Hub model ID for the export model."""
infer_dtype: Literal["auto", "float16", "float32", "bfloat16"]
"""Inference data type for the export model."""
export_legacy_format: bool
"""Use legacy format for the export model."""
class FreezeConfigDict(TypedDict, total=False):
name: Literal["freeze"]
"""Plugin name."""
freeze_trainable_layers: int
"""Freeze trainable layers."""
freeze_trainable_modules: Union[list[str], str]
"""Freeze trainable modules."""
freeze_extra_modules: list[str]
"""Freeze extra modules."""
cast_trainable_params_to_fp32: bool
"""Cast trainable params to fp32."""
class PeftPlugin(BasePlugin):
def __call__(self, model: HFModel, config: dict, is_train: bool) -> HFModel:
return super().__call__(model, config, is_train)
def _find_all_linear_modules(model: HFModel) -> list[str]:
r"""Find all available modules to apply LoRA."""
forbidden_modules = {"lm_head", "output_layer", "output"}
module_names = set()
for name, module in model.named_modules():
if any(forbidden_module in name for forbidden_module in forbidden_modules):
continue
if "Linear" in module.__class__.__name__ and "Embedding" not in module.__class__.__name__:
module_names.add(name.split(".")[-1])
return list(module_names)
def merge_adapters(model: HFModel, adapter_name_or_path: Union[list[str], str]) -> HFModel:
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
for adapter_path in adapter_name_or_path:
model = PeftModel.from_pretrained(model, adapter_path)
model = model.merge_and_unload()
logger.info_rank0(f"Merged adapter from {adapter_path}")
return model
def load_adapter(model: HFModel, adapter_name_or_path: Union[list[str], str], is_train: bool) -> HFModel:
r"""Loads adapter(s) into the model.
Determine adapter usage based on mode:
- Training: Load the single adapter for continued training.
- Inference: Merge all adapters to clean up the model.
- Unmergeable: Keep the single adapter active without merging.
"""
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
# TODO
# Adapters fix for deepspeed and quant
# Adapters fix for vision
if is_train and len(adapter_name_or_path) > 1:
raise ValueError(
"When `adapter_name_or_path` is provided for training, only a single LoRA adapter is supported. "
"Training will continue on the specified adapter. "
"Please merge multiple adapters before starting a new LoRA adapter."
)
if is_train:
adapter_to_merge = []
adapter_to_resume = adapter_name_or_path[0]
else:
adapter_to_merge = adapter_name_or_path
adapter_to_resume = None
if adapter_to_merge:
model = merge_adapters(model, adapter_to_merge)
if adapter_to_resume is not None:
model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_train)
if is_train:
logger.info_rank0(
f"Resuming training from existing LoRA adapter at {adapter_to_resume}. "
"LoRA hyperparameters will be loaded from the adapter itself; "
"the current LoRA configuration will be ignored. "
"Merge the adapter into the base model before training if you want to start a new adapter."
)
return model
@PeftPlugin("lora").register()
def get_lora_model(model: HFModel, config: LoraConfigDict, is_train: bool = False) -> HFModel:
# TODO: Implement this function
@PeftPlugin("freeze").register()
def get_freeze_model(model: HFModel, config: FreezeConfigDict, is_train: bool = False) -> HFModel:
logger.info_rank0("Fine-tuning method: Freeze")
if not is_train:
return model
freeze_trainable_layers = config.get("freeze_trainable_layers", 2)
freeze_trainable_modules = config.get("freeze_trainable_modules", ["all"])
freeze_extra_modules = config.get("freeze_extra_modules", [])
cast_trainable_params_to_fp32 = config.get("cast_trainable_params_to_fp32", True)
if isinstance(freeze_trainable_modules, str):
freeze_trainable_modules = [module.strip() for module in freeze_trainable_modules.split(",")]
if isinstance(freeze_extra_modules, str):
freeze_extra_modules = [module.strip() for module in freeze_extra_modules.split(",")]
# Get number of layers
num_layers = (
getattr(model.config, "num_hidden_layers", None)
or getattr(model.config, "num_layers", None)
or getattr(model.config, "n_layer", None)
)
if not num_layers:
raise ValueError("Current model does not support freeze tuning.")
if freeze_trainable_layers > 0:
# last n layers
trainable_layer_ids = range(max(0, num_layers - freeze_trainable_layers), num_layers)
else:
# first n layers
trainable_layer_ids = range(min(-freeze_trainable_layers, num_layers))
# Identify hidden and non-hidden modules
hidden_modules = set()
non_hidden_modules = set()
for name, _ in model.named_parameters():
if ".0." in name:
hidden_modules.add(name.split(".0.")[-1].split(".")[0])
elif ".1." in name:
hidden_modules.add(name.split(".1.")[-1].split(".")[0])
if re.search(r"\.\d+\.", name) is None:
non_hidden_modules.add(name.split(".")[-2])
# Build list of trainable layer patterns
trainable_layers = []
for module_name in freeze_trainable_modules:
if module_name == "all":
for idx in trainable_layer_ids:
trainable_layers.append(f".{idx:d}.")
elif module_name in hidden_modules:
for idx in trainable_layer_ids:
trainable_layers.append(f".{idx:d}.{module_name}")
else:
raise ValueError(f"Module {module_name} not found in hidden modules: {hidden_modules}")
# Add extra modules
if freeze_extra_modules:
for module_name in freeze_extra_modules:
if module_name in non_hidden_modules:
trainable_layers.append(module_name)
else:
raise ValueError(f"Module {module_name} not found in non-hidden modules: {non_hidden_modules}")
# TODO
# Multi-modal special handling
# Set requires_grad
forbidden_modules = {"quant_state", "quantization_weight", "qweight", "qzeros", "scales"}
for name, param in model.named_parameters():
if any(trainable_layer in name for trainable_layer in trainable_layers) and not any(
forbidden_module in name for forbidden_module in forbidden_modules
):
param.requires_grad_(True)
if cast_trainable_params_to_fp32:
param.data = param.data.to(torch.float32) # Cast to fp32 for stability
else:
param.requires_grad_(False)
logger.info_rank0(f"Set trainable layers: {trainable_layers}")
# Count trainable params for verification
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
all_params = sum(p.numel() for p in model.parameters())
logger.info_rank0(
f"trainable params: {trainable_params} || all params: {all_params} || trainable%: {100 * trainable_params / all_params:.4f}"
)
return model
def merge_and_export_model(args: InputArgument = None):
model_args, _, _, _ = get_args(args)
export_config = model_args.peft_config
if export_config is None:
raise ValueError("Please specify peft_config to merge and export model.")
export_dir = export_config.get("export_dir")
if export_dir is None:
raise ValueError("Please specify export_dir.")
export_size = export_config.get("export_size", 5)
export_hub_model_id = export_config.get("export_hub_model_id")
infer_dtype = export_config.get("infer_dtype", "auto")
export_legacy_format = export_config.get("export_legacy_format", False)
adapters = None
if export_config.get("name") == "lora":
adapters = export_config.get("adapter_name_or_path")
else:
raise ValueError("Currently merge and export model function is only supported for lora.")
if adapters is None:
raise ValueError("Please set adapter_name_or_path to merge adapters into base model.")
logger.info_rank0("Loading model for export...")
model_engine = ModelEngine(model_args, is_train=False)
model = model_engine.model
tokenizer = model_engine.processor
if infer_dtype == "auto":
if model.config.torch_dtype == torch.float32 and torch.cuda.is_bf16_supported():
model = model.to(torch.bfloat16)
logger.info_rank0("Converted model to bfloat16.")
else:
target_dtype = getattr(torch, infer_dtype)
model = model.to(target_dtype)
logger.info_rank0(f"Converted model to {infer_dtype}.")
logger.info_rank0(f"Exporting model to {export_dir}...")
model.save_pretrained(
export_dir,
max_shard_size=f"{export_size}GB",
safe_serialization=not export_legacy_format,
)
if tokenizer is not None:
try:
if hasattr(tokenizer, "padding_side"):
tokenizer.padding_side = "left"
tokenizer.save_pretrained(export_dir)
except Exception as e:
logger.warning(f"Failed to save tokenizer: {e}")
if export_hub_model_id:
logger.info_rank0(f"Pushing to hub: {export_hub_model_id}...")
model.push_to_hub(export_hub_model_id)
if tokenizer is not None:
tokenizer.push_to_hub(export_hub_model_id)
logger.info_rank0("Model exported successfully.") | def get_lora_model(model: HFModel, config: LoraConfigDict, is_train: bool = False) -> HFModel: | adapter_name_or_path = config.get("adapter_name_or_path")
if adapter_name_or_path:
return load_adapter(model, adapter_name_or_path, is_train)
logger.info_rank0("Fine-tuning method: LoRA")
target_modules = config.get("target_modules", "all")
# Handle target modules
if target_modules == "all":
target_modules = _find_all_linear_modules(model)
elif isinstance(target_modules, str):
target_modules = [target_modules]
logger.info_rank0(f"LoRA target modules: {target_modules}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=not is_train,
r=config.get("r", 8),
lora_alpha=config.get("lora_alpha", 16),
lora_dropout=config.get("lora_dropout", 0.05),
use_rslora=config.get("use_rslora", False),
use_dora=config.get("use_dora", False),
target_modules=target_modules,
modules_to_save=config.get("modules_to_save", None),
)
model = get_peft_model(model, peft_config)
if is_train:
model.print_trainable_parameters()
return model | def get_lora_model(model: HFModel, config: LoraConfigDict, is_train: bool = False) -> HFModel:
adapter_name_or_path = config.get("adapter_name_or_path")
if adapter_name_or_path:
return load_adapter(model, adapter_name_or_path, is_train)
logger.info_rank0("Fine-tuning method: LoRA")
target_modules = config.get("target_modules", "all")
# Handle target modules
if target_modules == "all":
target_modules = _find_all_linear_modules(model)
elif isinstance(target_modules, str):
target_modules = [target_modules]
logger.info_rank0(f"LoRA target modules: {target_modules}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=not is_train,
r=config.get("r", 8),
lora_alpha=config.get("lora_alpha", 16),
lora_dropout=config.get("lora_dropout", 0.05),
use_rslora=config.get("use_rslora", False),
use_dora=config.get("use_dora", False),
target_modules=target_modules,
modules_to_save=config.get("modules_to_save", None),
)
model = get_peft_model(model, peft_config)
if is_train:
model.print_trainable_parameters()
return model | [{"test_file": "tests_v1/plugins/model_plugins/test_peft.py", "test_function": "test_get_lora_model", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom peft import LoraConfig, PeftModel, get_peft_model\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nfrom llamafactory.v1.plugins.model_plugins import peft as peft_module\nfrom llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model\n\n\nTINY_MODEL = \"llamafactory/tiny-random-qwen3\"\n\n\n@pytest.fixture(scope=\"module\")\ndef model_path():\n return TINY_MODEL\n\n\n@pytest.fixture(scope=\"function\")\ndef model(model_path):\n return AutoModelForCausalLM.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef tokenizer(model_path):\n return AutoTokenizer.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef adapter_path(tmp_path):\n # Create a dummy adapter\n lora_config = LoraConfig(\n r=8,\n lora_alpha=16,\n target_modules=[\"q_proj\", \"v_proj\"],\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n base_model = AutoModelForCausalLM.from_pretrained(TINY_MODEL)\n peft_model = get_peft_model(base_model, lora_config)\n save_path = tmp_path / \"test_adapter\"\n peft_model.save_pretrained(save_path)\n return str(save_path)\n\n\ndef test_find_all_linear_modules(model):\n \"\"\"Verify linear modules are discoverable and include q_proj / v_proj for tiny-random-qwen3.\"\"\"\n modules = peft_module._find_all_linear_modules(model)\n expected_subset = {\"q_proj\", \"v_proj\"}\n assert expected_subset.issubset(set(modules))\n\n\ndef test_get_lora_model(model):\n \"\"\"Verify a PeftModel is returned and LoRA config takes effect.\"\"\"\n config = {\"name\": \"lora\", \"r\": 8, \"target_modules\": \"all\", \"lora_alpha\": 16}\n model = peft_module.get_lora_model(model, config, is_train=True)\n assert isinstance(model, PeftModel)\n assert model.peft_config[\"default\"].r == 8\n assert \"q_proj\" in model.peft_config[\"default\"].target_modules\n\n\ndef test_get_freeze_model_layers(model):\n \"\"\"Verify layer-wise freezing: only the last layer stays trainable.\"\"\"\n # Freeze all but last layer\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"all\"}\n\n # Ensure we start with something known\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n assert num_layers > 0\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n elif \"layers.0\" in name and num_layers > 1:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_get_freeze_model_modules(model):\n \"\"\"Verify module-wise freezing: only last-layer self_attn is trainable.\"\"\"\n # Freeze specific modules (e.g. only self_attn)\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"self_attn\"}\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name and \"self_attn\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n else:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_load_adapter_single_for_inference(model, adapter_path):\n \"\"\"Verify single adapter is merged+unloaded in inference mode.\"\"\"\n # Test loading single adapter for inference (merge and unload)\n model_result = peft_module.load_adapter(model, adapter_path, is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_resume_train(model, adapter_path):\n \"\"\"Verify training mode returns a trainable PeftModel.\"\"\"\n # Test loading for training\n model_result = peft_module.load_adapter(model, adapter_path, is_train=True)\n assert isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_train_multiple_disallowed(model, adapter_path):\n \"\"\"Verify multiple adapters are rejected in training mode.\"\"\"\n with pytest.raises(ValueError, match=\"only a single LoRA adapter\"):\n peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=True)\n\n\ndef test_load_adapter_infer_multiple_merges(model, adapter_path):\n \"\"\"Verify multiple adapters are merged in inference mode.\"\"\"\n # Test merging multiple adapters\n model_result = peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_merge_and_export_model(tmp_path, adapter_path):\n \"\"\"Verify merge_and_export_model produces export artifacts.\"\"\"\n export_dir = tmp_path / \"export\"\n\n args_dict = {\n \"model\": TINY_MODEL,\n \"peft_config\": {\n \"name\": \"lora\",\n \"adapter_name_or_path\": adapter_path,\n \"export_dir\": str(export_dir),\n \"export_size\": 1,\n \"infer_dtype\": \"float16\",\n },\n }\n\n merge_and_export_model(args_dict)\n\n assert export_dir.exists()\n assert (export_dir / \"config.json\").exists()\n assert (export_dir / \"model.safetensors\").exists()\n assert (export_dir / \"tokenizer_config.json\").exists()\n", "framework": "pytest", "test_command": "pytest tests_v1/plugins/model_plugins/test_peft.py::test_get_lora_model -xvs"}] | {"repo_url": "https://github.com/hiyouga/LlamaFactory", "install_cmd": "pip install -e .", "commit_sha": "c0245c43fc1fbb87ed6b2f2d28bdcceed5103946", "frozen_requirements": "frozen_requirements/hiyouga_LlamaFactory.txt"} | {"body_lines": 26, "file_lines": 344, "has_docstring": false, "num_tests": 1} | {"status": "passed", "tests_run": 1} | repo_patch/0029 | file_overlap | |
repo_patch/0022 | hiyouga/LlamaFactory | src/llamafactory/v1/core/utils/rendering.py | render_messages | Renderer.render_messages | method | Renderer | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rendering utils.
How to use:
renderer = Renderer(template, processor)
renderer.render_messages(messages: list[Message], tools: str | None) -> ModelInputs
renderer.parse_message(text: str) -> Message
renderer.process_samples(samples: list[Sample]) -> list[ModelInput]
"""
import numpy as np
from ...utils.constants import IGNORE_INDEX
from ...utils.helper import get_tokenizer
from ...utils.types import Message, ModelInput, Processor, Sample
def render_chatml_messages(
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
) -> ModelInput:
"""Apply chatml template to messages and convert them to model input.
See https://huggingface.co/spaces/huggingfacejs/chat-template-playground?modelId=Qwen/Qwen2-7B-Instruct
"""
tokenizer = get_tokenizer(processor)
input_ids, labels, loss_weights = [], [], []
for message in messages:
temp_str = "<|im_start|>" + message["role"] + "\n"
for content in message["content"]:
if content["type"] == "text":
temp_str += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 1.0 if message["role"] == "assistant" else 0.0)
temp_ids = tokenizer.encode(temp_str, add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([temp_weight] * len(temp_ids))
if temp_weight > 1e-6:
labels.extend(temp_ids)
else:
labels.extend([IGNORE_INDEX] * len(temp_ids))
if is_generate:
temp_ids = tokenizer.encode("<|im_start|>assistant\n", add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([0.0] * len(temp_ids))
labels.extend([IGNORE_INDEX] * len(temp_ids))
return ModelInput(
input_ids=input_ids,
attention_mask=[1] * len(input_ids),
labels=labels,
loss_weights=loss_weights,
)
def parse_chatml_message(generated_text: str) -> Message:
"""Parse a message in ChatML format.
Args:
generated_text (str): The generated text in ChatML format.
Returns:
Message: The parsed message.
"""
return Message(role="assistant", content=[{"type": "text", "value": generated_text}])
class Renderer:
def __init__(self, template: str, processor: Processor):
self.template = template
self.processor = processor
def render_messages(
self, messages: list[Message], tools: str | None = None, is_generate: bool = False
) -> ModelInput:
"""Apply template to messages and convert them to model input.
Args:
messages (list[Message]): The messages to render.
tools (str | None, optional): The tools to use. Defaults to None.
is_generate (bool, optional): Whether to render for generation. Defaults to False.
Returns:
ModelInput: The rendered model input.
"""
# TODO: Implement this function
def parse_message(self, generated_text: str) -> Message:
"""Parse a message in the template format.
Args:
generated_text (str): The generated text in the template format.
Returns:
Message: The parsed message.
"""
if self.template == "chatml":
return parse_chatml_message(generated_text)
else:
from ...plugins.model_plugins.rendering import RenderingPlugin
return RenderingPlugin(self.template).parse_message(generated_text)
def process_samples(self, samples: list[Sample]) -> list[ModelInput]:
"""Process samples to model input.
Args:
samples (list[Sample]): The samples to process.
Returns:
list[ModelInput]: The processed model inputs.
"""
model_inputs = []
for sample in samples:
if "messages" in sample:
model_input = self.render_messages(sample["messages"], sample.get("tools"))
elif "chosen_messages" in sample and "rejected_messages" in sample:
chosen_input = self.render_messages(sample["chosen_messages"], sample.get("tools"))
rejected_input = self.render_messages(sample["rejected_messages"], sample.get("tools"))
chosen_input["token_type_ids"] = [1] * len(chosen_input["input_ids"])
rejected_input["token_type_ids"] = [2] * len(rejected_input["input_ids"])
model_input = ModelInput(
input_ids=chosen_input["input_ids"] + rejected_input["input_ids"],
attention_mask=chosen_input["attention_mask"] + rejected_input["attention_mask"],
labels=chosen_input["labels"] + rejected_input["labels"],
loss_weights=chosen_input["loss_weights"] + rejected_input["loss_weights"],
token_type_ids=chosen_input["token_type_ids"] + rejected_input["token_type_ids"],
)
if "position_ids" in chosen_input:
model_input["position_ids"] = np.concatenate(
[chosen_input["position_ids"], rejected_input["position_ids"]], axis=-1
)
else:
raise ValueError("No valid messages or chosen_messages/rejected_messages found in sample.")
if "extra_info" in sample:
model_input["extra_info"] = sample["extra_info"]
if "_dataset_name" in sample:
model_input["_dataset_name"] = sample["_dataset_name"]
model_inputs.append(model_input)
return model_inputs | def render_messages(
self, messages: list[Message], tools: str | None = None, is_generate: bool = False
) -> ModelInput:
"""Apply template to messages and convert them to model input.
Args:
messages (list[Message]): The messages to render.
tools (str | None, optional): The tools to use. Defaults to None.
is_generate (bool, optional): Whether to render for generation. Defaults to False.
Returns:
ModelInput: The rendered model input.
""" | Apply template to messages and convert them to model input.
Args:
messages (list[Message]): The messages to render.
tools (str | None, optional): The tools to use. Defaults to None.
is_generate (bool, optional): Whether to render for generation. Defaults to False.
Returns:
ModelInput: The rendered model input. | if self.template == "chatml":
return render_chatml_messages(self.processor, messages, tools, is_generate)
else:
from ...plugins.model_plugins.rendering import RenderingPlugin
return RenderingPlugin(self.template).render_messages(self.processor, messages, tools, is_generate) | def render_messages(
self, messages: list[Message], tools: str | None = None, is_generate: bool = False
) -> ModelInput:
"""Apply template to messages and convert them to model input.
Args:
messages (list[Message]): The messages to render.
tools (str | None, optional): The tools to use. Defaults to None.
is_generate (bool, optional): Whether to render for generation. Defaults to False.
Returns:
ModelInput: The rendered model input.
"""
if self.template == "chatml":
return render_chatml_messages(self.processor, messages, tools, is_generate)
else:
from ...plugins.model_plugins.rendering import RenderingPlugin
return RenderingPlugin(self.template).render_messages(self.processor, messages, tools, is_generate) | [{"test_file": "tests_v1/core/utils/test_rendering.py", "test_function": "test_chatml_rendering", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pytest\nfrom transformers import AutoTokenizer\n\nfrom llamafactory.v1.config import DataArguments\nfrom llamafactory.v1.core.data_engine import DataEngine\nfrom llamafactory.v1.core.utils.rendering import Renderer\nfrom llamafactory.v1.utils.types import Processor\n\n\ndef _get_input_ids(inputs: list | dict) -> list:\n if not isinstance(inputs, list):\n return inputs[\"input_ids\"]\n else:\n return inputs\n\n\nHF_MESSAGES = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is LLM?\"},\n {\"role\": \"assistant\", \"content\": \"LLM stands for Large Language Model.\"},\n]\n\nV1_MESSAGES = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is LLM?\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"LLM stands for Large Language Model.\"}]},\n]\n\nHF_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is 6*8?\"},\n {\n \"role\": \"assistant\",\n \"tool_calls\": [{\"type\": \"function\", \"function\": {\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}}],\n },\n {\"role\": \"tool\", \"content\": \"48.\"},\n {\"role\": \"assistant\", \"content\": \"The result of 6*8 is 48.\"},\n]\n\nV1_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is 6*8?\"}]},\n {\n \"role\": \"assistant\",\n \"content\": [{\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})}],\n \"loss_weight\": 0.0,\n },\n {\"role\": \"tool\", \"content\": [{\"type\": \"text\", \"value\": \"48.\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"The result of 6*8 is 48.\"}]},\n]\n\nV1_TOOLS = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"multiply\",\n \"description\": \"A function that multiplies two numbers\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\"type\": \"number\", \"description\": \"The first number to multiply\"},\n \"b\": {\"type\": \"number\", \"description\": \"The second number to multiply\"},\n },\n \"required\": [\"a\", \"b\"],\n },\n },\n }\n]\n\n\ndef test_chatml_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=True))\n v1_inputs = renderer.render_messages(V1_MESSAGES[:-1], is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=False))\n hf_inputs_full = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES, add_generation_prompt=False))\n v1_inputs_full = renderer.render_messages(V1_MESSAGES, is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_chatml_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n generated_text = \"LLM stands for Large Language Model.\"\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == V1_MESSAGES[-1]\n\n\n@pytest.mark.parametrize(\"num_samples\", [16])\ndef test_chatml_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/v1-sft-demo\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], is_generate=True)\n prefix = tokenizer.encode(\"<|im_start|>user\\n\", add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_qwen3_nothink_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=True)\n )\n v1_inputs = renderer.render_messages(V1_MESSAGES_WITH_TOOLS[:-1], tools=json.dumps(V1_TOOLS), is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=False)\n )\n hf_inputs_full = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS, tools=V1_TOOLS, add_generation_prompt=False)\n )\n v1_inputs_full = renderer.render_messages(V1_MESSAGES_WITH_TOOLS, tools=json.dumps(V1_TOOLS), is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_qwen3_nothink_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n generated_text = (\n \"<thinking>I need to use the multiply function to calculate 6*8.</thinking>\"\n \"Let me call the multiply function.\"\n '<tool_call>{\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}</tool_call>'\n )\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == {\n \"role\": \"assistant\",\n \"content\": [\n {\"type\": \"reasoning\", \"value\": \"I need to use the multiply function to calculate 6*8.\"},\n {\"type\": \"text\", \"value\": \"Let me call the multiply function.\"},\n {\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})},\n ],\n }\n\n\n@pytest.mark.parametrize(\"num_samples\", [8])\ndef test_qwen3_nothink_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/reason-tool-use-demo-1500\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], tools=data_engine[index][\"tools\"])\n prefix_text = (\n \"<|im_start|>system\\nYou are a methodical and expert assistant. \"\n \"Your primary goal is to solve user requests by leveraging a set of available tools. \"\n \"You must reason for the best course of action in a structured manner before responding.\\n\\n\"\n \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\n\"\n \"You are provided with function signatures within <tools></tools> XML tags:\\n<tools>\\n\"\n '{\"type\": \"function\", \"function\": {\"name\":'\n )\n prefix = tokenizer.encode(prefix_text, add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_process_sft_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [{\"messages\": V1_MESSAGES, \"extra_info\": \"test\", \"_dataset_name\": \"default\"}]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\ndef test_process_dpo_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [\n {\n \"chosen_messages\": V1_MESSAGES,\n \"rejected_messages\": V1_MESSAGES,\n \"extra_info\": \"test\",\n \"_dataset_name\": \"default\",\n }\n ]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs * 2\n assert model_inputs[0][\"token_type_ids\"] == [1] * len(hf_inputs) + [2] * len(hf_inputs)\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\nif __name__ == \"__main__\":\n \"\"\"\n python -m tests_v1.core.utils.test_rendering\n \"\"\"\n test_chatml_rendering()\n test_chatml_parse()\n test_chatml_rendering_remote(16)\n test_qwen3_nothink_rendering()\n test_qwen3_nothink_parse()\n test_qwen3_nothink_rendering_remote(16)\n test_process_sft_samples()\n test_process_dpo_samples()\n", "framework": "pytest", "test_command": "pytest tests_v1/core/utils/test_rendering.py::test_chatml_rendering -xvs"}, {"test_file": "tests_v1/core/utils/test_rendering.py", "test_function": "test_chatml_rendering_remote", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pytest\nfrom transformers import AutoTokenizer\n\nfrom llamafactory.v1.config import DataArguments\nfrom llamafactory.v1.core.data_engine import DataEngine\nfrom llamafactory.v1.core.utils.rendering import Renderer\nfrom llamafactory.v1.utils.types import Processor\n\n\ndef _get_input_ids(inputs: list | dict) -> list:\n if not isinstance(inputs, list):\n return inputs[\"input_ids\"]\n else:\n return inputs\n\n\nHF_MESSAGES = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is LLM?\"},\n {\"role\": \"assistant\", \"content\": \"LLM stands for Large Language Model.\"},\n]\n\nV1_MESSAGES = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is LLM?\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"LLM stands for Large Language Model.\"}]},\n]\n\nHF_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is 6*8?\"},\n {\n \"role\": \"assistant\",\n \"tool_calls\": [{\"type\": \"function\", \"function\": {\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}}],\n },\n {\"role\": \"tool\", \"content\": \"48.\"},\n {\"role\": \"assistant\", \"content\": \"The result of 6*8 is 48.\"},\n]\n\nV1_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is 6*8?\"}]},\n {\n \"role\": \"assistant\",\n \"content\": [{\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})}],\n \"loss_weight\": 0.0,\n },\n {\"role\": \"tool\", \"content\": [{\"type\": \"text\", \"value\": \"48.\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"The result of 6*8 is 48.\"}]},\n]\n\nV1_TOOLS = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"multiply\",\n \"description\": \"A function that multiplies two numbers\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\"type\": \"number\", \"description\": \"The first number to multiply\"},\n \"b\": {\"type\": \"number\", \"description\": \"The second number to multiply\"},\n },\n \"required\": [\"a\", \"b\"],\n },\n },\n }\n]\n\n\ndef test_chatml_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=True))\n v1_inputs = renderer.render_messages(V1_MESSAGES[:-1], is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=False))\n hf_inputs_full = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES, add_generation_prompt=False))\n v1_inputs_full = renderer.render_messages(V1_MESSAGES, is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_chatml_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n generated_text = \"LLM stands for Large Language Model.\"\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == V1_MESSAGES[-1]\n\n\n@pytest.mark.parametrize(\"num_samples\", [16])\ndef test_chatml_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/v1-sft-demo\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], is_generate=True)\n prefix = tokenizer.encode(\"<|im_start|>user\\n\", add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_qwen3_nothink_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=True)\n )\n v1_inputs = renderer.render_messages(V1_MESSAGES_WITH_TOOLS[:-1], tools=json.dumps(V1_TOOLS), is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=False)\n )\n hf_inputs_full = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS, tools=V1_TOOLS, add_generation_prompt=False)\n )\n v1_inputs_full = renderer.render_messages(V1_MESSAGES_WITH_TOOLS, tools=json.dumps(V1_TOOLS), is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_qwen3_nothink_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n generated_text = (\n \"<thinking>I need to use the multiply function to calculate 6*8.</thinking>\"\n \"Let me call the multiply function.\"\n '<tool_call>{\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}</tool_call>'\n )\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == {\n \"role\": \"assistant\",\n \"content\": [\n {\"type\": \"reasoning\", \"value\": \"I need to use the multiply function to calculate 6*8.\"},\n {\"type\": \"text\", \"value\": \"Let me call the multiply function.\"},\n {\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})},\n ],\n }\n\n\n@pytest.mark.parametrize(\"num_samples\", [8])\ndef test_qwen3_nothink_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/reason-tool-use-demo-1500\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], tools=data_engine[index][\"tools\"])\n prefix_text = (\n \"<|im_start|>system\\nYou are a methodical and expert assistant. \"\n \"Your primary goal is to solve user requests by leveraging a set of available tools. \"\n \"You must reason for the best course of action in a structured manner before responding.\\n\\n\"\n \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\n\"\n \"You are provided with function signatures within <tools></tools> XML tags:\\n<tools>\\n\"\n '{\"type\": \"function\", \"function\": {\"name\":'\n )\n prefix = tokenizer.encode(prefix_text, add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_process_sft_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [{\"messages\": V1_MESSAGES, \"extra_info\": \"test\", \"_dataset_name\": \"default\"}]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\ndef test_process_dpo_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [\n {\n \"chosen_messages\": V1_MESSAGES,\n \"rejected_messages\": V1_MESSAGES,\n \"extra_info\": \"test\",\n \"_dataset_name\": \"default\",\n }\n ]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs * 2\n assert model_inputs[0][\"token_type_ids\"] == [1] * len(hf_inputs) + [2] * len(hf_inputs)\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\nif __name__ == \"__main__\":\n \"\"\"\n python -m tests_v1.core.utils.test_rendering\n \"\"\"\n test_chatml_rendering()\n test_chatml_parse()\n test_chatml_rendering_remote(16)\n test_qwen3_nothink_rendering()\n test_qwen3_nothink_parse()\n test_qwen3_nothink_rendering_remote(16)\n test_process_sft_samples()\n test_process_dpo_samples()\n", "framework": "pytest", "test_command": "pytest tests_v1/core/utils/test_rendering.py::test_chatml_rendering_remote -xvs"}, {"test_file": "tests_v1/core/utils/test_rendering.py", "test_function": "test_qwen3_nothink_rendering", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pytest\nfrom transformers import AutoTokenizer\n\nfrom llamafactory.v1.config import DataArguments\nfrom llamafactory.v1.core.data_engine import DataEngine\nfrom llamafactory.v1.core.utils.rendering import Renderer\nfrom llamafactory.v1.utils.types import Processor\n\n\ndef _get_input_ids(inputs: list | dict) -> list:\n if not isinstance(inputs, list):\n return inputs[\"input_ids\"]\n else:\n return inputs\n\n\nHF_MESSAGES = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is LLM?\"},\n {\"role\": \"assistant\", \"content\": \"LLM stands for Large Language Model.\"},\n]\n\nV1_MESSAGES = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is LLM?\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"LLM stands for Large Language Model.\"}]},\n]\n\nHF_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is 6*8?\"},\n {\n \"role\": \"assistant\",\n \"tool_calls\": [{\"type\": \"function\", \"function\": {\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}}],\n },\n {\"role\": \"tool\", \"content\": \"48.\"},\n {\"role\": \"assistant\", \"content\": \"The result of 6*8 is 48.\"},\n]\n\nV1_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is 6*8?\"}]},\n {\n \"role\": \"assistant\",\n \"content\": [{\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})}],\n \"loss_weight\": 0.0,\n },\n {\"role\": \"tool\", \"content\": [{\"type\": \"text\", \"value\": \"48.\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"The result of 6*8 is 48.\"}]},\n]\n\nV1_TOOLS = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"multiply\",\n \"description\": \"A function that multiplies two numbers\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\"type\": \"number\", \"description\": \"The first number to multiply\"},\n \"b\": {\"type\": \"number\", \"description\": \"The second number to multiply\"},\n },\n \"required\": [\"a\", \"b\"],\n },\n },\n }\n]\n\n\ndef test_chatml_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=True))\n v1_inputs = renderer.render_messages(V1_MESSAGES[:-1], is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=False))\n hf_inputs_full = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES, add_generation_prompt=False))\n v1_inputs_full = renderer.render_messages(V1_MESSAGES, is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_chatml_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n generated_text = \"LLM stands for Large Language Model.\"\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == V1_MESSAGES[-1]\n\n\n@pytest.mark.parametrize(\"num_samples\", [16])\ndef test_chatml_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/v1-sft-demo\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], is_generate=True)\n prefix = tokenizer.encode(\"<|im_start|>user\\n\", add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_qwen3_nothink_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=True)\n )\n v1_inputs = renderer.render_messages(V1_MESSAGES_WITH_TOOLS[:-1], tools=json.dumps(V1_TOOLS), is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=False)\n )\n hf_inputs_full = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS, tools=V1_TOOLS, add_generation_prompt=False)\n )\n v1_inputs_full = renderer.render_messages(V1_MESSAGES_WITH_TOOLS, tools=json.dumps(V1_TOOLS), is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_qwen3_nothink_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n generated_text = (\n \"<thinking>I need to use the multiply function to calculate 6*8.</thinking>\"\n \"Let me call the multiply function.\"\n '<tool_call>{\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}</tool_call>'\n )\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == {\n \"role\": \"assistant\",\n \"content\": [\n {\"type\": \"reasoning\", \"value\": \"I need to use the multiply function to calculate 6*8.\"},\n {\"type\": \"text\", \"value\": \"Let me call the multiply function.\"},\n {\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})},\n ],\n }\n\n\n@pytest.mark.parametrize(\"num_samples\", [8])\ndef test_qwen3_nothink_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/reason-tool-use-demo-1500\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], tools=data_engine[index][\"tools\"])\n prefix_text = (\n \"<|im_start|>system\\nYou are a methodical and expert assistant. \"\n \"Your primary goal is to solve user requests by leveraging a set of available tools. \"\n \"You must reason for the best course of action in a structured manner before responding.\\n\\n\"\n \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\n\"\n \"You are provided with function signatures within <tools></tools> XML tags:\\n<tools>\\n\"\n '{\"type\": \"function\", \"function\": {\"name\":'\n )\n prefix = tokenizer.encode(prefix_text, add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_process_sft_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [{\"messages\": V1_MESSAGES, \"extra_info\": \"test\", \"_dataset_name\": \"default\"}]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\ndef test_process_dpo_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [\n {\n \"chosen_messages\": V1_MESSAGES,\n \"rejected_messages\": V1_MESSAGES,\n \"extra_info\": \"test\",\n \"_dataset_name\": \"default\",\n }\n ]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs * 2\n assert model_inputs[0][\"token_type_ids\"] == [1] * len(hf_inputs) + [2] * len(hf_inputs)\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\nif __name__ == \"__main__\":\n \"\"\"\n python -m tests_v1.core.utils.test_rendering\n \"\"\"\n test_chatml_rendering()\n test_chatml_parse()\n test_chatml_rendering_remote(16)\n test_qwen3_nothink_rendering()\n test_qwen3_nothink_parse()\n test_qwen3_nothink_rendering_remote(16)\n test_process_sft_samples()\n test_process_dpo_samples()\n", "framework": "pytest", "test_command": "pytest tests_v1/core/utils/test_rendering.py::test_qwen3_nothink_rendering -xvs"}, {"test_file": "tests_v1/core/utils/test_rendering.py", "test_function": "test_qwen3_nothink_rendering_remote", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pytest\nfrom transformers import AutoTokenizer\n\nfrom llamafactory.v1.config import DataArguments\nfrom llamafactory.v1.core.data_engine import DataEngine\nfrom llamafactory.v1.core.utils.rendering import Renderer\nfrom llamafactory.v1.utils.types import Processor\n\n\ndef _get_input_ids(inputs: list | dict) -> list:\n if not isinstance(inputs, list):\n return inputs[\"input_ids\"]\n else:\n return inputs\n\n\nHF_MESSAGES = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is LLM?\"},\n {\"role\": \"assistant\", \"content\": \"LLM stands for Large Language Model.\"},\n]\n\nV1_MESSAGES = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is LLM?\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"LLM stands for Large Language Model.\"}]},\n]\n\nHF_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"What is 6*8?\"},\n {\n \"role\": \"assistant\",\n \"tool_calls\": [{\"type\": \"function\", \"function\": {\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}}],\n },\n {\"role\": \"tool\", \"content\": \"48.\"},\n {\"role\": \"assistant\", \"content\": \"The result of 6*8 is 48.\"},\n]\n\nV1_MESSAGES_WITH_TOOLS = [\n {\"role\": \"system\", \"content\": [{\"type\": \"text\", \"value\": \"You are a helpful assistant.\"}]},\n {\"role\": \"user\", \"content\": [{\"type\": \"text\", \"value\": \"What is 6*8?\"}]},\n {\n \"role\": \"assistant\",\n \"content\": [{\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})}],\n \"loss_weight\": 0.0,\n },\n {\"role\": \"tool\", \"content\": [{\"type\": \"text\", \"value\": \"48.\"}]},\n {\"role\": \"assistant\", \"content\": [{\"type\": \"text\", \"value\": \"The result of 6*8 is 48.\"}]},\n]\n\nV1_TOOLS = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"multiply\",\n \"description\": \"A function that multiplies two numbers\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"a\": {\"type\": \"number\", \"description\": \"The first number to multiply\"},\n \"b\": {\"type\": \"number\", \"description\": \"The second number to multiply\"},\n },\n \"required\": [\"a\", \"b\"],\n },\n },\n }\n]\n\n\ndef test_chatml_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=True))\n v1_inputs = renderer.render_messages(V1_MESSAGES[:-1], is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES[:-1], add_generation_prompt=False))\n hf_inputs_full = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES, add_generation_prompt=False))\n v1_inputs_full = renderer.render_messages(V1_MESSAGES, is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_chatml_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n generated_text = \"LLM stands for Large Language Model.\"\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == V1_MESSAGES[-1]\n\n\n@pytest.mark.parametrize(\"num_samples\", [16])\ndef test_chatml_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/v1-sft-demo\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], is_generate=True)\n prefix = tokenizer.encode(\"<|im_start|>user\\n\", add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_qwen3_nothink_rendering():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n\n hf_inputs = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=True)\n )\n v1_inputs = renderer.render_messages(V1_MESSAGES_WITH_TOOLS[:-1], tools=json.dumps(V1_TOOLS), is_generate=True)\n assert v1_inputs[\"input_ids\"] == hf_inputs\n assert v1_inputs[\"attention_mask\"] == [1] * len(hf_inputs)\n assert v1_inputs[\"labels\"] == [-100] * len(hf_inputs)\n assert v1_inputs[\"loss_weights\"] == [0.0] * len(hf_inputs)\n\n hf_inputs_part = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS[:-1], tools=V1_TOOLS, add_generation_prompt=False)\n )\n hf_inputs_full = _get_input_ids(\n tokenizer.apply_chat_template(HF_MESSAGES_WITH_TOOLS, tools=V1_TOOLS, add_generation_prompt=False)\n )\n v1_inputs_full = renderer.render_messages(V1_MESSAGES_WITH_TOOLS, tools=json.dumps(V1_TOOLS), is_generate=False)\n assert v1_inputs_full[\"input_ids\"] == hf_inputs_full\n assert v1_inputs_full[\"attention_mask\"] == [1] * len(hf_inputs_full)\n assert v1_inputs_full[\"labels\"] == [-100] * len(hf_inputs_part) + hf_inputs_full[len(hf_inputs_part) :]\n assert v1_inputs_full[\"loss_weights\"] == [0.0] * len(hf_inputs_part) + [1.0] * (\n len(hf_inputs_full) - len(hf_inputs_part)\n )\n\n\ndef test_qwen3_nothink_parse():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n generated_text = (\n \"<thinking>I need to use the multiply function to calculate 6*8.</thinking>\"\n \"Let me call the multiply function.\"\n '<tool_call>{\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}}</tool_call>'\n )\n parsed_message = renderer.parse_message(generated_text)\n assert parsed_message == {\n \"role\": \"assistant\",\n \"content\": [\n {\"type\": \"reasoning\", \"value\": \"I need to use the multiply function to calculate 6*8.\"},\n {\"type\": \"text\", \"value\": \"Let me call the multiply function.\"},\n {\"type\": \"tool_call\", \"value\": json.dumps({\"name\": \"multiply\", \"arguments\": {\"a\": 6, \"b\": 8}})},\n ],\n }\n\n\n@pytest.mark.parametrize(\"num_samples\", [8])\ndef test_qwen3_nothink_rendering_remote(num_samples: int):\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-4B-Instruct-2507\")\n renderer = Renderer(template=\"qwen3_nothink\", processor=tokenizer)\n data_args = DataArguments(train_dataset=\"llamafactory/reason-tool-use-demo-1500\")\n data_engine = DataEngine(data_args.train_dataset)\n for index in range(num_samples):\n v1_inputs = renderer.render_messages(data_engine[index][\"messages\"], tools=data_engine[index][\"tools\"])\n prefix_text = (\n \"<|im_start|>system\\nYou are a methodical and expert assistant. \"\n \"Your primary goal is to solve user requests by leveraging a set of available tools. \"\n \"You must reason for the best course of action in a structured manner before responding.\\n\\n\"\n \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\n\"\n \"You are provided with function signatures within <tools></tools> XML tags:\\n<tools>\\n\"\n '{\"type\": \"function\", \"function\": {\"name\":'\n )\n prefix = tokenizer.encode(prefix_text, add_special_tokens=False)\n print(tokenizer.decode(v1_inputs[\"input_ids\"][: len(prefix)]))\n assert v1_inputs[\"input_ids\"][: len(prefix)] == prefix\n\n\ndef test_process_sft_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [{\"messages\": V1_MESSAGES, \"extra_info\": \"test\", \"_dataset_name\": \"default\"}]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\ndef test_process_dpo_samples():\n tokenizer: Processor = AutoTokenizer.from_pretrained(\"llamafactory/tiny-random-qwen3\")\n renderer = Renderer(template=\"chatml\", processor=tokenizer)\n hf_inputs = _get_input_ids(tokenizer.apply_chat_template(HF_MESSAGES))\n\n samples = [\n {\n \"chosen_messages\": V1_MESSAGES,\n \"rejected_messages\": V1_MESSAGES,\n \"extra_info\": \"test\",\n \"_dataset_name\": \"default\",\n }\n ]\n model_inputs = renderer.process_samples(samples)\n assert len(model_inputs) == 1\n assert model_inputs[0][\"input_ids\"] == hf_inputs * 2\n assert model_inputs[0][\"token_type_ids\"] == [1] * len(hf_inputs) + [2] * len(hf_inputs)\n assert model_inputs[0][\"extra_info\"] == \"test\"\n assert model_inputs[0][\"_dataset_name\"] == \"default\"\n\n\nif __name__ == \"__main__\":\n \"\"\"\n python -m tests_v1.core.utils.test_rendering\n \"\"\"\n test_chatml_rendering()\n test_chatml_parse()\n test_chatml_rendering_remote(16)\n test_qwen3_nothink_rendering()\n test_qwen3_nothink_parse()\n test_qwen3_nothink_rendering_remote(16)\n test_process_sft_samples()\n test_process_dpo_samples()\n", "framework": "pytest", "test_command": "pytest tests_v1/core/utils/test_rendering.py::test_qwen3_nothink_rendering_remote -xvs"}] | {"repo_url": "https://github.com/hiyouga/LlamaFactory", "install_cmd": "pip install -e .", "commit_sha": "c0245c43fc1fbb87ed6b2f2d28bdcceed5103946", "frozen_requirements": "frozen_requirements/hiyouga_LlamaFactory.txt"} | {"body_lines": 5, "file_lines": 170, "has_docstring": true, "num_tests": 4} | {"status": "passed", "tests_run": 4} | repo_patch/0031 | file_overlap |
repo_patch/0023 | hiyouga/LlamaFactory | src/llamafactory/v1/plugins/model_plugins/peft.py | load_adapter | load_adapter | function | null | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Literal, TypedDict, Union
import torch
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from ...config import InputArgument, get_args
from ...core.model_engine import ModelEngine
from ...utils import logging
from ...utils.plugin import BasePlugin
from ...utils.types import HFModel
logger = logging.get_logger(__name__)
class LoraConfigDict(TypedDict, total=False):
name: Literal["lora"]
"""Plugin name."""
r: int
"""Lora rank."""
lora_alpha: int
"""Lora alpha."""
lora_dropout: float
"""Lora dropout."""
target_modules: Union[list[str], str]
"""Target modules."""
use_rslora: bool
"""Use RS-LoRA."""
use_dora: bool
"""Use DoRA."""
modules_to_save: list[str]
"""Modules to save."""
adapter_name_or_path: Union[list[str], str]
"""Path to the adapter(s)."""
export_dir: str
"""Path to the export directory."""
export_size: int
"""Shard size for the export model."""
export_hub_model_id: str
"""Hub model ID for the export model."""
infer_dtype: Literal["auto", "float16", "float32", "bfloat16"]
"""Inference data type for the export model."""
export_legacy_format: bool
"""Use legacy format for the export model."""
class FreezeConfigDict(TypedDict, total=False):
name: Literal["freeze"]
"""Plugin name."""
freeze_trainable_layers: int
"""Freeze trainable layers."""
freeze_trainable_modules: Union[list[str], str]
"""Freeze trainable modules."""
freeze_extra_modules: list[str]
"""Freeze extra modules."""
cast_trainable_params_to_fp32: bool
"""Cast trainable params to fp32."""
class PeftPlugin(BasePlugin):
def __call__(self, model: HFModel, config: dict, is_train: bool) -> HFModel:
return super().__call__(model, config, is_train)
def _find_all_linear_modules(model: HFModel) -> list[str]:
r"""Find all available modules to apply LoRA."""
forbidden_modules = {"lm_head", "output_layer", "output"}
module_names = set()
for name, module in model.named_modules():
if any(forbidden_module in name for forbidden_module in forbidden_modules):
continue
if "Linear" in module.__class__.__name__ and "Embedding" not in module.__class__.__name__:
module_names.add(name.split(".")[-1])
return list(module_names)
def merge_adapters(model: HFModel, adapter_name_or_path: Union[list[str], str]) -> HFModel:
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
for adapter_path in adapter_name_or_path:
model = PeftModel.from_pretrained(model, adapter_path)
model = model.merge_and_unload()
logger.info_rank0(f"Merged adapter from {adapter_path}")
return model
def load_adapter(model: HFModel, adapter_name_or_path: Union[list[str], str], is_train: bool) -> HFModel:
r"""Loads adapter(s) into the model.
Determine adapter usage based on mode:
- Training: Load the single adapter for continued training.
- Inference: Merge all adapters to clean up the model.
- Unmergeable: Keep the single adapter active without merging.
"""
# TODO: Implement this function
@PeftPlugin("lora").register()
def get_lora_model(model: HFModel, config: LoraConfigDict, is_train: bool = False) -> HFModel:
adapter_name_or_path = config.get("adapter_name_or_path")
if adapter_name_or_path:
return load_adapter(model, adapter_name_or_path, is_train)
logger.info_rank0("Fine-tuning method: LoRA")
target_modules = config.get("target_modules", "all")
# Handle target modules
if target_modules == "all":
target_modules = _find_all_linear_modules(model)
elif isinstance(target_modules, str):
target_modules = [target_modules]
logger.info_rank0(f"LoRA target modules: {target_modules}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=not is_train,
r=config.get("r", 8),
lora_alpha=config.get("lora_alpha", 16),
lora_dropout=config.get("lora_dropout", 0.05),
use_rslora=config.get("use_rslora", False),
use_dora=config.get("use_dora", False),
target_modules=target_modules,
modules_to_save=config.get("modules_to_save", None),
)
model = get_peft_model(model, peft_config)
if is_train:
model.print_trainable_parameters()
return model
@PeftPlugin("freeze").register()
def get_freeze_model(model: HFModel, config: FreezeConfigDict, is_train: bool = False) -> HFModel:
logger.info_rank0("Fine-tuning method: Freeze")
if not is_train:
return model
freeze_trainable_layers = config.get("freeze_trainable_layers", 2)
freeze_trainable_modules = config.get("freeze_trainable_modules", ["all"])
freeze_extra_modules = config.get("freeze_extra_modules", [])
cast_trainable_params_to_fp32 = config.get("cast_trainable_params_to_fp32", True)
if isinstance(freeze_trainable_modules, str):
freeze_trainable_modules = [module.strip() for module in freeze_trainable_modules.split(",")]
if isinstance(freeze_extra_modules, str):
freeze_extra_modules = [module.strip() for module in freeze_extra_modules.split(",")]
# Get number of layers
num_layers = (
getattr(model.config, "num_hidden_layers", None)
or getattr(model.config, "num_layers", None)
or getattr(model.config, "n_layer", None)
)
if not num_layers:
raise ValueError("Current model does not support freeze tuning.")
if freeze_trainable_layers > 0:
# last n layers
trainable_layer_ids = range(max(0, num_layers - freeze_trainable_layers), num_layers)
else:
# first n layers
trainable_layer_ids = range(min(-freeze_trainable_layers, num_layers))
# Identify hidden and non-hidden modules
hidden_modules = set()
non_hidden_modules = set()
for name, _ in model.named_parameters():
if ".0." in name:
hidden_modules.add(name.split(".0.")[-1].split(".")[0])
elif ".1." in name:
hidden_modules.add(name.split(".1.")[-1].split(".")[0])
if re.search(r"\.\d+\.", name) is None:
non_hidden_modules.add(name.split(".")[-2])
# Build list of trainable layer patterns
trainable_layers = []
for module_name in freeze_trainable_modules:
if module_name == "all":
for idx in trainable_layer_ids:
trainable_layers.append(f".{idx:d}.")
elif module_name in hidden_modules:
for idx in trainable_layer_ids:
trainable_layers.append(f".{idx:d}.{module_name}")
else:
raise ValueError(f"Module {module_name} not found in hidden modules: {hidden_modules}")
# Add extra modules
if freeze_extra_modules:
for module_name in freeze_extra_modules:
if module_name in non_hidden_modules:
trainable_layers.append(module_name)
else:
raise ValueError(f"Module {module_name} not found in non-hidden modules: {non_hidden_modules}")
# TODO
# Multi-modal special handling
# Set requires_grad
forbidden_modules = {"quant_state", "quantization_weight", "qweight", "qzeros", "scales"}
for name, param in model.named_parameters():
if any(trainable_layer in name for trainable_layer in trainable_layers) and not any(
forbidden_module in name for forbidden_module in forbidden_modules
):
param.requires_grad_(True)
if cast_trainable_params_to_fp32:
param.data = param.data.to(torch.float32) # Cast to fp32 for stability
else:
param.requires_grad_(False)
logger.info_rank0(f"Set trainable layers: {trainable_layers}")
# Count trainable params for verification
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
all_params = sum(p.numel() for p in model.parameters())
logger.info_rank0(
f"trainable params: {trainable_params} || all params: {all_params} || trainable%: {100 * trainable_params / all_params:.4f}"
)
return model
def merge_and_export_model(args: InputArgument = None):
model_args, _, _, _ = get_args(args)
export_config = model_args.peft_config
if export_config is None:
raise ValueError("Please specify peft_config to merge and export model.")
export_dir = export_config.get("export_dir")
if export_dir is None:
raise ValueError("Please specify export_dir.")
export_size = export_config.get("export_size", 5)
export_hub_model_id = export_config.get("export_hub_model_id")
infer_dtype = export_config.get("infer_dtype", "auto")
export_legacy_format = export_config.get("export_legacy_format", False)
adapters = None
if export_config.get("name") == "lora":
adapters = export_config.get("adapter_name_or_path")
else:
raise ValueError("Currently merge and export model function is only supported for lora.")
if adapters is None:
raise ValueError("Please set adapter_name_or_path to merge adapters into base model.")
logger.info_rank0("Loading model for export...")
model_engine = ModelEngine(model_args, is_train=False)
model = model_engine.model
tokenizer = model_engine.processor
if infer_dtype == "auto":
if model.config.torch_dtype == torch.float32 and torch.cuda.is_bf16_supported():
model = model.to(torch.bfloat16)
logger.info_rank0("Converted model to bfloat16.")
else:
target_dtype = getattr(torch, infer_dtype)
model = model.to(target_dtype)
logger.info_rank0(f"Converted model to {infer_dtype}.")
logger.info_rank0(f"Exporting model to {export_dir}...")
model.save_pretrained(
export_dir,
max_shard_size=f"{export_size}GB",
safe_serialization=not export_legacy_format,
)
if tokenizer is not None:
try:
if hasattr(tokenizer, "padding_side"):
tokenizer.padding_side = "left"
tokenizer.save_pretrained(export_dir)
except Exception as e:
logger.warning(f"Failed to save tokenizer: {e}")
if export_hub_model_id:
logger.info_rank0(f"Pushing to hub: {export_hub_model_id}...")
model.push_to_hub(export_hub_model_id)
if tokenizer is not None:
tokenizer.push_to_hub(export_hub_model_id)
logger.info_rank0("Model exported successfully.") | def load_adapter(model: HFModel, adapter_name_or_path: Union[list[str], str], is_train: bool) -> HFModel:
r"""Loads adapter(s) into the model.
Determine adapter usage based on mode:
- Training: Load the single adapter for continued training.
- Inference: Merge all adapters to clean up the model.
- Unmergeable: Keep the single adapter active without merging.
""" | Loads adapter(s) into the model.
Determine adapter usage based on mode:
- Training: Load the single adapter for continued training.
- Inference: Merge all adapters to clean up the model.
- Unmergeable: Keep the single adapter active without merging. | if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
# TODO
# Adapters fix for deepspeed and quant
# Adapters fix for vision
if is_train and len(adapter_name_or_path) > 1:
raise ValueError(
"When `adapter_name_or_path` is provided for training, only a single LoRA adapter is supported. "
"Training will continue on the specified adapter. "
"Please merge multiple adapters before starting a new LoRA adapter."
)
if is_train:
adapter_to_merge = []
adapter_to_resume = adapter_name_or_path[0]
else:
adapter_to_merge = adapter_name_or_path
adapter_to_resume = None
if adapter_to_merge:
model = merge_adapters(model, adapter_to_merge)
if adapter_to_resume is not None:
model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_train)
if is_train:
logger.info_rank0(
f"Resuming training from existing LoRA adapter at {adapter_to_resume}. "
"LoRA hyperparameters will be loaded from the adapter itself; "
"the current LoRA configuration will be ignored. "
"Merge the adapter into the base model before training if you want to start a new adapter."
)
return model | def load_adapter(model: HFModel, adapter_name_or_path: Union[list[str], str], is_train: bool) -> HFModel:
r"""Loads adapter(s) into the model.
Determine adapter usage based on mode:
- Training: Load the single adapter for continued training.
- Inference: Merge all adapters to clean up the model.
- Unmergeable: Keep the single adapter active without merging.
"""
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
# TODO
# Adapters fix for deepspeed and quant
# Adapters fix for vision
if is_train and len(adapter_name_or_path) > 1:
raise ValueError(
"When `adapter_name_or_path` is provided for training, only a single LoRA adapter is supported. "
"Training will continue on the specified adapter. "
"Please merge multiple adapters before starting a new LoRA adapter."
)
if is_train:
adapter_to_merge = []
adapter_to_resume = adapter_name_or_path[0]
else:
adapter_to_merge = adapter_name_or_path
adapter_to_resume = None
if adapter_to_merge:
model = merge_adapters(model, adapter_to_merge)
if adapter_to_resume is not None:
model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_train)
if is_train:
logger.info_rank0(
f"Resuming training from existing LoRA adapter at {adapter_to_resume}. "
"LoRA hyperparameters will be loaded from the adapter itself; "
"the current LoRA configuration will be ignored. "
"Merge the adapter into the base model before training if you want to start a new adapter."
)
return model | [{"test_file": "tests_v1/plugins/model_plugins/test_peft.py", "test_function": "test_load_adapter_single_for_inference", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom peft import LoraConfig, PeftModel, get_peft_model\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nfrom llamafactory.v1.plugins.model_plugins import peft as peft_module\nfrom llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model\n\n\nTINY_MODEL = \"llamafactory/tiny-random-qwen3\"\n\n\n@pytest.fixture(scope=\"module\")\ndef model_path():\n return TINY_MODEL\n\n\n@pytest.fixture(scope=\"function\")\ndef model(model_path):\n return AutoModelForCausalLM.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef tokenizer(model_path):\n return AutoTokenizer.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef adapter_path(tmp_path):\n # Create a dummy adapter\n lora_config = LoraConfig(\n r=8,\n lora_alpha=16,\n target_modules=[\"q_proj\", \"v_proj\"],\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n base_model = AutoModelForCausalLM.from_pretrained(TINY_MODEL)\n peft_model = get_peft_model(base_model, lora_config)\n save_path = tmp_path / \"test_adapter\"\n peft_model.save_pretrained(save_path)\n return str(save_path)\n\n\ndef test_find_all_linear_modules(model):\n \"\"\"Verify linear modules are discoverable and include q_proj / v_proj for tiny-random-qwen3.\"\"\"\n modules = peft_module._find_all_linear_modules(model)\n expected_subset = {\"q_proj\", \"v_proj\"}\n assert expected_subset.issubset(set(modules))\n\n\ndef test_get_lora_model(model):\n \"\"\"Verify a PeftModel is returned and LoRA config takes effect.\"\"\"\n config = {\"name\": \"lora\", \"r\": 8, \"target_modules\": \"all\", \"lora_alpha\": 16}\n model = peft_module.get_lora_model(model, config, is_train=True)\n assert isinstance(model, PeftModel)\n assert model.peft_config[\"default\"].r == 8\n assert \"q_proj\" in model.peft_config[\"default\"].target_modules\n\n\ndef test_get_freeze_model_layers(model):\n \"\"\"Verify layer-wise freezing: only the last layer stays trainable.\"\"\"\n # Freeze all but last layer\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"all\"}\n\n # Ensure we start with something known\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n assert num_layers > 0\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n elif \"layers.0\" in name and num_layers > 1:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_get_freeze_model_modules(model):\n \"\"\"Verify module-wise freezing: only last-layer self_attn is trainable.\"\"\"\n # Freeze specific modules (e.g. only self_attn)\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"self_attn\"}\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name and \"self_attn\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n else:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_load_adapter_single_for_inference(model, adapter_path):\n \"\"\"Verify single adapter is merged+unloaded in inference mode.\"\"\"\n # Test loading single adapter for inference (merge and unload)\n model_result = peft_module.load_adapter(model, adapter_path, is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_resume_train(model, adapter_path):\n \"\"\"Verify training mode returns a trainable PeftModel.\"\"\"\n # Test loading for training\n model_result = peft_module.load_adapter(model, adapter_path, is_train=True)\n assert isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_train_multiple_disallowed(model, adapter_path):\n \"\"\"Verify multiple adapters are rejected in training mode.\"\"\"\n with pytest.raises(ValueError, match=\"only a single LoRA adapter\"):\n peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=True)\n\n\ndef test_load_adapter_infer_multiple_merges(model, adapter_path):\n \"\"\"Verify multiple adapters are merged in inference mode.\"\"\"\n # Test merging multiple adapters\n model_result = peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_merge_and_export_model(tmp_path, adapter_path):\n \"\"\"Verify merge_and_export_model produces export artifacts.\"\"\"\n export_dir = tmp_path / \"export\"\n\n args_dict = {\n \"model\": TINY_MODEL,\n \"peft_config\": {\n \"name\": \"lora\",\n \"adapter_name_or_path\": adapter_path,\n \"export_dir\": str(export_dir),\n \"export_size\": 1,\n \"infer_dtype\": \"float16\",\n },\n }\n\n merge_and_export_model(args_dict)\n\n assert export_dir.exists()\n assert (export_dir / \"config.json\").exists()\n assert (export_dir / \"model.safetensors\").exists()\n assert (export_dir / \"tokenizer_config.json\").exists()\n", "framework": "pytest", "test_command": "pytest tests_v1/plugins/model_plugins/test_peft.py::test_load_adapter_single_for_inference -xvs"}, {"test_file": "tests_v1/plugins/model_plugins/test_peft.py", "test_function": "test_load_adapter_resume_train", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom peft import LoraConfig, PeftModel, get_peft_model\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nfrom llamafactory.v1.plugins.model_plugins import peft as peft_module\nfrom llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model\n\n\nTINY_MODEL = \"llamafactory/tiny-random-qwen3\"\n\n\n@pytest.fixture(scope=\"module\")\ndef model_path():\n return TINY_MODEL\n\n\n@pytest.fixture(scope=\"function\")\ndef model(model_path):\n return AutoModelForCausalLM.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef tokenizer(model_path):\n return AutoTokenizer.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef adapter_path(tmp_path):\n # Create a dummy adapter\n lora_config = LoraConfig(\n r=8,\n lora_alpha=16,\n target_modules=[\"q_proj\", \"v_proj\"],\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n base_model = AutoModelForCausalLM.from_pretrained(TINY_MODEL)\n peft_model = get_peft_model(base_model, lora_config)\n save_path = tmp_path / \"test_adapter\"\n peft_model.save_pretrained(save_path)\n return str(save_path)\n\n\ndef test_find_all_linear_modules(model):\n \"\"\"Verify linear modules are discoverable and include q_proj / v_proj for tiny-random-qwen3.\"\"\"\n modules = peft_module._find_all_linear_modules(model)\n expected_subset = {\"q_proj\", \"v_proj\"}\n assert expected_subset.issubset(set(modules))\n\n\ndef test_get_lora_model(model):\n \"\"\"Verify a PeftModel is returned and LoRA config takes effect.\"\"\"\n config = {\"name\": \"lora\", \"r\": 8, \"target_modules\": \"all\", \"lora_alpha\": 16}\n model = peft_module.get_lora_model(model, config, is_train=True)\n assert isinstance(model, PeftModel)\n assert model.peft_config[\"default\"].r == 8\n assert \"q_proj\" in model.peft_config[\"default\"].target_modules\n\n\ndef test_get_freeze_model_layers(model):\n \"\"\"Verify layer-wise freezing: only the last layer stays trainable.\"\"\"\n # Freeze all but last layer\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"all\"}\n\n # Ensure we start with something known\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n assert num_layers > 0\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n elif \"layers.0\" in name and num_layers > 1:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_get_freeze_model_modules(model):\n \"\"\"Verify module-wise freezing: only last-layer self_attn is trainable.\"\"\"\n # Freeze specific modules (e.g. only self_attn)\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"self_attn\"}\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name and \"self_attn\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n else:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_load_adapter_single_for_inference(model, adapter_path):\n \"\"\"Verify single adapter is merged+unloaded in inference mode.\"\"\"\n # Test loading single adapter for inference (merge and unload)\n model_result = peft_module.load_adapter(model, adapter_path, is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_resume_train(model, adapter_path):\n \"\"\"Verify training mode returns a trainable PeftModel.\"\"\"\n # Test loading for training\n model_result = peft_module.load_adapter(model, adapter_path, is_train=True)\n assert isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_train_multiple_disallowed(model, adapter_path):\n \"\"\"Verify multiple adapters are rejected in training mode.\"\"\"\n with pytest.raises(ValueError, match=\"only a single LoRA adapter\"):\n peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=True)\n\n\ndef test_load_adapter_infer_multiple_merges(model, adapter_path):\n \"\"\"Verify multiple adapters are merged in inference mode.\"\"\"\n # Test merging multiple adapters\n model_result = peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_merge_and_export_model(tmp_path, adapter_path):\n \"\"\"Verify merge_and_export_model produces export artifacts.\"\"\"\n export_dir = tmp_path / \"export\"\n\n args_dict = {\n \"model\": TINY_MODEL,\n \"peft_config\": {\n \"name\": \"lora\",\n \"adapter_name_or_path\": adapter_path,\n \"export_dir\": str(export_dir),\n \"export_size\": 1,\n \"infer_dtype\": \"float16\",\n },\n }\n\n merge_and_export_model(args_dict)\n\n assert export_dir.exists()\n assert (export_dir / \"config.json\").exists()\n assert (export_dir / \"model.safetensors\").exists()\n assert (export_dir / \"tokenizer_config.json\").exists()\n", "framework": "pytest", "test_command": "pytest tests_v1/plugins/model_plugins/test_peft.py::test_load_adapter_resume_train -xvs"}, {"test_file": "tests_v1/plugins/model_plugins/test_peft.py", "test_function": "test_load_adapter_train_multiple_disallowed", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom peft import LoraConfig, PeftModel, get_peft_model\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nfrom llamafactory.v1.plugins.model_plugins import peft as peft_module\nfrom llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model\n\n\nTINY_MODEL = \"llamafactory/tiny-random-qwen3\"\n\n\n@pytest.fixture(scope=\"module\")\ndef model_path():\n return TINY_MODEL\n\n\n@pytest.fixture(scope=\"function\")\ndef model(model_path):\n return AutoModelForCausalLM.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef tokenizer(model_path):\n return AutoTokenizer.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef adapter_path(tmp_path):\n # Create a dummy adapter\n lora_config = LoraConfig(\n r=8,\n lora_alpha=16,\n target_modules=[\"q_proj\", \"v_proj\"],\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n base_model = AutoModelForCausalLM.from_pretrained(TINY_MODEL)\n peft_model = get_peft_model(base_model, lora_config)\n save_path = tmp_path / \"test_adapter\"\n peft_model.save_pretrained(save_path)\n return str(save_path)\n\n\ndef test_find_all_linear_modules(model):\n \"\"\"Verify linear modules are discoverable and include q_proj / v_proj for tiny-random-qwen3.\"\"\"\n modules = peft_module._find_all_linear_modules(model)\n expected_subset = {\"q_proj\", \"v_proj\"}\n assert expected_subset.issubset(set(modules))\n\n\ndef test_get_lora_model(model):\n \"\"\"Verify a PeftModel is returned and LoRA config takes effect.\"\"\"\n config = {\"name\": \"lora\", \"r\": 8, \"target_modules\": \"all\", \"lora_alpha\": 16}\n model = peft_module.get_lora_model(model, config, is_train=True)\n assert isinstance(model, PeftModel)\n assert model.peft_config[\"default\"].r == 8\n assert \"q_proj\" in model.peft_config[\"default\"].target_modules\n\n\ndef test_get_freeze_model_layers(model):\n \"\"\"Verify layer-wise freezing: only the last layer stays trainable.\"\"\"\n # Freeze all but last layer\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"all\"}\n\n # Ensure we start with something known\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n assert num_layers > 0\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n elif \"layers.0\" in name and num_layers > 1:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_get_freeze_model_modules(model):\n \"\"\"Verify module-wise freezing: only last-layer self_attn is trainable.\"\"\"\n # Freeze specific modules (e.g. only self_attn)\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"self_attn\"}\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name and \"self_attn\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n else:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_load_adapter_single_for_inference(model, adapter_path):\n \"\"\"Verify single adapter is merged+unloaded in inference mode.\"\"\"\n # Test loading single adapter for inference (merge and unload)\n model_result = peft_module.load_adapter(model, adapter_path, is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_resume_train(model, adapter_path):\n \"\"\"Verify training mode returns a trainable PeftModel.\"\"\"\n # Test loading for training\n model_result = peft_module.load_adapter(model, adapter_path, is_train=True)\n assert isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_train_multiple_disallowed(model, adapter_path):\n \"\"\"Verify multiple adapters are rejected in training mode.\"\"\"\n with pytest.raises(ValueError, match=\"only a single LoRA adapter\"):\n peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=True)\n\n\ndef test_load_adapter_infer_multiple_merges(model, adapter_path):\n \"\"\"Verify multiple adapters are merged in inference mode.\"\"\"\n # Test merging multiple adapters\n model_result = peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_merge_and_export_model(tmp_path, adapter_path):\n \"\"\"Verify merge_and_export_model produces export artifacts.\"\"\"\n export_dir = tmp_path / \"export\"\n\n args_dict = {\n \"model\": TINY_MODEL,\n \"peft_config\": {\n \"name\": \"lora\",\n \"adapter_name_or_path\": adapter_path,\n \"export_dir\": str(export_dir),\n \"export_size\": 1,\n \"infer_dtype\": \"float16\",\n },\n }\n\n merge_and_export_model(args_dict)\n\n assert export_dir.exists()\n assert (export_dir / \"config.json\").exists()\n assert (export_dir / \"model.safetensors\").exists()\n assert (export_dir / \"tokenizer_config.json\").exists()\n", "framework": "pytest", "test_command": "pytest tests_v1/plugins/model_plugins/test_peft.py::test_load_adapter_train_multiple_disallowed -xvs"}, {"test_file": "tests_v1/plugins/model_plugins/test_peft.py", "test_function": "test_load_adapter_infer_multiple_merges", "test_content": "# Copyright 2025 the LlamaFactory team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nfrom peft import LoraConfig, PeftModel, get_peft_model\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nfrom llamafactory.v1.plugins.model_plugins import peft as peft_module\nfrom llamafactory.v1.plugins.model_plugins.peft import merge_and_export_model\n\n\nTINY_MODEL = \"llamafactory/tiny-random-qwen3\"\n\n\n@pytest.fixture(scope=\"module\")\ndef model_path():\n return TINY_MODEL\n\n\n@pytest.fixture(scope=\"function\")\ndef model(model_path):\n return AutoModelForCausalLM.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef tokenizer(model_path):\n return AutoTokenizer.from_pretrained(model_path)\n\n\n@pytest.fixture(scope=\"function\")\ndef adapter_path(tmp_path):\n # Create a dummy adapter\n lora_config = LoraConfig(\n r=8,\n lora_alpha=16,\n target_modules=[\"q_proj\", \"v_proj\"],\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n base_model = AutoModelForCausalLM.from_pretrained(TINY_MODEL)\n peft_model = get_peft_model(base_model, lora_config)\n save_path = tmp_path / \"test_adapter\"\n peft_model.save_pretrained(save_path)\n return str(save_path)\n\n\ndef test_find_all_linear_modules(model):\n \"\"\"Verify linear modules are discoverable and include q_proj / v_proj for tiny-random-qwen3.\"\"\"\n modules = peft_module._find_all_linear_modules(model)\n expected_subset = {\"q_proj\", \"v_proj\"}\n assert expected_subset.issubset(set(modules))\n\n\ndef test_get_lora_model(model):\n \"\"\"Verify a PeftModel is returned and LoRA config takes effect.\"\"\"\n config = {\"name\": \"lora\", \"r\": 8, \"target_modules\": \"all\", \"lora_alpha\": 16}\n model = peft_module.get_lora_model(model, config, is_train=True)\n assert isinstance(model, PeftModel)\n assert model.peft_config[\"default\"].r == 8\n assert \"q_proj\" in model.peft_config[\"default\"].target_modules\n\n\ndef test_get_freeze_model_layers(model):\n \"\"\"Verify layer-wise freezing: only the last layer stays trainable.\"\"\"\n # Freeze all but last layer\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"all\"}\n\n # Ensure we start with something known\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n assert num_layers > 0\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n elif \"layers.0\" in name and num_layers > 1:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_get_freeze_model_modules(model):\n \"\"\"Verify module-wise freezing: only last-layer self_attn is trainable.\"\"\"\n # Freeze specific modules (e.g. only self_attn)\n config = {\"name\": \"freeze\", \"freeze_trainable_layers\": 1, \"freeze_trainable_modules\": \"self_attn\"}\n model = peft_module.get_freeze_model(model, config, is_train=True)\n\n num_layers = model.config.num_hidden_layers\n\n for name, param in model.named_parameters():\n if f\"layers.{num_layers - 1}\" in name and \"self_attn\" in name:\n assert param.requires_grad, f\"{name} should be trainable\"\n else:\n assert not param.requires_grad, f\"{name} should be frozen\"\n\n\ndef test_load_adapter_single_for_inference(model, adapter_path):\n \"\"\"Verify single adapter is merged+unloaded in inference mode.\"\"\"\n # Test loading single adapter for inference (merge and unload)\n model_result = peft_module.load_adapter(model, adapter_path, is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_resume_train(model, adapter_path):\n \"\"\"Verify training mode returns a trainable PeftModel.\"\"\"\n # Test loading for training\n model_result = peft_module.load_adapter(model, adapter_path, is_train=True)\n assert isinstance(model_result, PeftModel)\n\n\ndef test_load_adapter_train_multiple_disallowed(model, adapter_path):\n \"\"\"Verify multiple adapters are rejected in training mode.\"\"\"\n with pytest.raises(ValueError, match=\"only a single LoRA adapter\"):\n peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=True)\n\n\ndef test_load_adapter_infer_multiple_merges(model, adapter_path):\n \"\"\"Verify multiple adapters are merged in inference mode.\"\"\"\n # Test merging multiple adapters\n model_result = peft_module.load_adapter(model, [adapter_path, adapter_path], is_train=False)\n assert not isinstance(model_result, PeftModel)\n\n\ndef test_merge_and_export_model(tmp_path, adapter_path):\n \"\"\"Verify merge_and_export_model produces export artifacts.\"\"\"\n export_dir = tmp_path / \"export\"\n\n args_dict = {\n \"model\": TINY_MODEL,\n \"peft_config\": {\n \"name\": \"lora\",\n \"adapter_name_or_path\": adapter_path,\n \"export_dir\": str(export_dir),\n \"export_size\": 1,\n \"infer_dtype\": \"float16\",\n },\n }\n\n merge_and_export_model(args_dict)\n\n assert export_dir.exists()\n assert (export_dir / \"config.json\").exists()\n assert (export_dir / \"model.safetensors\").exists()\n assert (export_dir / \"tokenizer_config.json\").exists()\n", "framework": "pytest", "test_command": "pytest tests_v1/plugins/model_plugins/test_peft.py::test_load_adapter_infer_multiple_merges -xvs"}] | {"repo_url": "https://github.com/hiyouga/LlamaFactory", "install_cmd": "pip install -e .", "commit_sha": "c0245c43fc1fbb87ed6b2f2d28bdcceed5103946", "frozen_requirements": "frozen_requirements/hiyouga_LlamaFactory.txt"} | {"body_lines": 29, "file_lines": 344, "has_docstring": true, "num_tests": 4} | {"status": "passed", "tests_run": 4} | repo_patch/0035 | file_overlap |
repo_patch/0024 | infiniflow/ragflow | common/string_utils.py | remove_redundant_spaces | remove_redundant_spaces | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
def remove_redundant_spaces(txt: str):
"""
Remove redundant spaces around punctuation marks while preserving meaningful spaces.
This function performs two main operations:
1. Remove spaces after left-boundary characters (opening brackets, etc.)
2. Remove spaces before right-boundary characters (closing brackets, punctuation, etc.)
Args:
txt (str): Input text to process
Returns:
str: Text with redundant spaces removed
"""
# First pass: Remove spaces after left-boundary characters
# Matches: [non-alphanumeric-and-specific-right-punctuation] + [non-space]
# Removes spaces after characters like '(', '<', and other non-alphanumeric chars
# Examples:
# "( test" → "(test"
# TODO: Implement this function
def clean_markdown_block(text):
"""
Remove Markdown code block syntax from the beginning and end of text.
This function cleans Markdown code blocks by removing:
- Opening ```Markdown tags (with optional whitespace and newlines)
- Closing ``` tags (with optional whitespace and newlines)
Args:
text (str): Input text that may be wrapped in Markdown code blocks
Returns:
str: Cleaned text with Markdown code block syntax removed, and stripped of surrounding whitespace
"""
# Remove opening ```Markdown tag with optional whitespace and newlines
# Matches: optional whitespace + ```markdown + optional whitespace + optional newline
text = re.sub(r'^\s*```markdown\s*\n?', '', text)
# Remove closing ``` tag with optional whitespace and newlines
# Matches: optional newline + optional whitespace + ``` + optional whitespace at end
text = re.sub(r'\n?\s*```\s*$', '', text)
# Return text with surrounding whitespace removed
return text.strip() | def remove_redundant_spaces(txt: str):
"""
Remove redundant spaces around punctuation marks while preserving meaningful spaces.
This function performs two main operations:
1. Remove spaces after left-boundary characters (opening brackets, etc.)
2. Remove spaces before right-boundary characters (closing brackets, punctuation, etc.)
Args:
txt (str): Input text to process
Returns:
str: Text with redundant spaces removed
"""
# First pass: Remove spaces after left-boundary characters
# Matches: [non-alphanumeric-and-specific-right-punctuation] + [non-space]
# Removes spaces after characters like '(', '<', and other non-alphanumeric chars
# Examples:
# "( test" → "(test" | Remove redundant spaces around punctuation marks while preserving meaningful spaces.
This function performs two main operations:
1. Remove spaces after left-boundary characters (opening brackets, etc.)
2. Remove spaces before right-boundary characters (closing brackets, punctuation, etc.)
Args:
txt (str): Input text to process
Returns:
str: Text with redundant spaces removed | txt = re.sub(r"([^a-z0-9.,\)>]) +([^ ])", r"\1\2", txt, flags=re.IGNORECASE)
# Second pass: Remove spaces before right-boundary characters
# Matches: [non-space] + [non-alphanumeric-and-specific-left-punctuation]
# Removes spaces before characters like non-')', non-',', non-'.', and non-alphanumeric chars
# Examples:
# "world !" → "world!"
return re.sub(r"([^ ]) +([^a-z0-9.,\(<])", r"\1\2", txt, flags=re.IGNORECASE) | def remove_redundant_spaces(txt: str):
"""
Remove redundant spaces around punctuation marks while preserving meaningful spaces.
This function performs two main operations:
1. Remove spaces after left-boundary characters (opening brackets, etc.)
2. Remove spaces before right-boundary characters (closing brackets, punctuation, etc.)
Args:
txt (str): Input text to process
Returns:
str: Text with redundant spaces removed
"""
# First pass: Remove spaces after left-boundary characters
# Matches: [non-alphanumeric-and-specific-right-punctuation] + [non-space]
# Removes spaces after characters like '(', '<', and other non-alphanumeric chars
# Examples:
# "( test" → "(test"
txt = re.sub(r"([^a-z0-9.,\)>]) +([^ ])", r"\1\2", txt, flags=re.IGNORECASE)
# Second pass: Remove spaces before right-boundary characters
# Matches: [non-space] + [non-alphanumeric-and-specific-left-punctuation]
# Removes spaces before characters like non-')', non-',', non-'.', and non-alphanumeric chars
# Examples:
# "world !" → "world!"
return re.sub(r"([^ ]) +([^a-z0-9.,\(<])", r"\1\2", txt, flags=re.IGNORECASE) | [{"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_remove_spaces_before_commas", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_remove_spaces_before_commas -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_remove_spaces_before_periods", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_remove_spaces_before_periods -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_remove_spaces_before_exclamation", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_remove_spaces_before_exclamation -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_remove_spaces_after_opening_parenthesis", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_remove_spaces_after_opening_parenthesis -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_remove_spaces_before_closing_parenthesis", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_remove_spaces_before_closing_parenthesis -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_keep_spaces_between_words", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_keep_spaces_between_words -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_mixed_punctuation", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_mixed_punctuation -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_with_numbers", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_with_numbers -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_decimal_numbers", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_decimal_numbers -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_time_format", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_time_format -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_currency_symbols", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_currency_symbols -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_empty_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_empty_string -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_only_spaces", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_only_spaces -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_no_redundant_spaces", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_no_redundant_spaces -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_multiple_spaces", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_multiple_spaces -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_angle_brackets", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_angle_brackets -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_case_insensitive", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_case_insensitive -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_semicolon_and_colon", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_semicolon_and_colon -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_quotation_marks", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_quotation_marks -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_abbreviations", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_abbreviations -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_multiple_punctuation", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_multiple_punctuation -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_email_addresses", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_email_addresses -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_urls", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_urls -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_hashtags_and_mentions", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_hashtags_and_mentions -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_nested_parentheses", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_nested_parentheses -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_math_expressions", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_math_expressions -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_html_tags", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_html_tags -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_programming_code", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_programming_code -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_unicode_and_special_symbols", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_unicode_and_special_symbols -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_mixed_chinese_english", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_mixed_chinese_english -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_special_characters_in_pattern", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_special_characters_in_pattern -xvs"}, {"test_file": "test/unit_test/common/test_string_utils.py", "test_function": "TestRemoveRedundantSpaces.test_tabs_and_newlines", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nfrom common.string_utils import remove_redundant_spaces, clean_markdown_block\n\n\nclass TestRemoveRedundantSpaces:\n\n # Basic punctuation tests\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_commas(self):\n \"\"\"Test removing spaces before commas\"\"\"\n input_text = \"Hello , world\"\n expected = \"Hello, world\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_remove_spaces_before_periods(self):\n \"\"\"Test removing spaces before periods\"\"\"\n input_text = \"This is a test .\"\n expected = \"This is a test.\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_exclamation(self):\n \"\"\"Test removing spaces before exclamation marks\"\"\"\n input_text = \"Amazing !\"\n expected = \"Amazing!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_after_opening_parenthesis(self):\n \"\"\"Test removing spaces after opening parenthesis\"\"\"\n input_text = \"This is ( test)\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_remove_spaces_before_closing_parenthesis(self):\n \"\"\"Test removing spaces before closing parenthesis\"\"\"\n input_text = \"This is (test )\"\n expected = \"This is (test)\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_keep_spaces_between_words(self):\n \"\"\"Test preserving normal spaces between words\"\"\"\n input_text = \"This should remain unchanged\"\n expected = \"This should remain unchanged\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_punctuation(self):\n \"\"\"Test mixed punctuation scenarios\"\"\"\n input_text = \"Hello , world ! This is ( test ) .\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Numbers and special formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_with_numbers(self):\n \"\"\"Test handling of numbers\"\"\"\n input_text = \"I have 100 , 000 dollars .\"\n expected = \"I have 100, 000 dollars.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_decimal_numbers(self):\n \"\"\"Test decimal numbers\"\"\"\n input_text = \"The value is 3 . 14 .\"\n expected = \"The value is 3.14.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_time_format(self):\n \"\"\"Test time format handling\"\"\"\n input_text = \"Time is 12 : 30 PM .\"\n expected = \"Time is 12:30 PM.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_currency_symbols(self):\n \"\"\"Test currency symbols\"\"\"\n input_text = \"Price : € 100 , £ 50 , ¥ 1000 .\"\n expected = \"Price: €100, £50, ¥1000.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Edge cases and special characters\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n assert remove_redundant_spaces(\"\") == \"\"\n\n def test_only_spaces(self):\n \"\"\"Test input with only spaces\"\"\"\n input_text = \" \"\n expected = \" \"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_no_redundant_spaces(self):\n \"\"\"Test text without redundant spaces\"\"\"\n input_text = \"Hello, world! This is (test).\"\n expected = \"Hello, world! This is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_spaces(self):\n \"\"\"Test multiple consecutive spaces\"\"\"\n input_text = \"Hello , world !\"\n expected = \"Hello, world!\"\n assert remove_redundant_spaces(input_text) == expected\n\n def test_angle_brackets(self):\n \"\"\"Test angle brackets handling\"\"\"\n input_text = \"This is < test >\"\n expected = \"This is <test>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_case_insensitive(self):\n \"\"\"Test case insensitivity\"\"\"\n input_text = \"HELLO , World !\"\n expected = \"HELLO, World!\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Additional punctuation marks\n @pytest.mark.skip(reason=\"Failed\")\n def test_semicolon_and_colon(self):\n \"\"\"Test semicolon and colon handling\"\"\"\n input_text = \"Items : apple ; banana ; orange .\"\n expected = \"Items: apple; banana; orange.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_quotation_marks(self):\n \"\"\"Test quotation marks handling\"\"\"\n input_text = 'He said , \" Hello \" .'\n expected = 'He said, \"Hello\".'\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_abbreviations(self):\n \"\"\"Test abbreviations\"\"\"\n input_text = \"Dr . Smith and Mr . Jones .\"\n expected = \"Dr. Smith and Mr. Jones.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_multiple_punctuation(self):\n \"\"\"Test multiple consecutive punctuation marks\"\"\"\n input_text = \"Wow !! ... Really ??\"\n expected = \"Wow!! ... Really??\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Special text formats\n @pytest.mark.skip(reason=\"Failed\")\n def test_email_addresses(self):\n \"\"\"Test email addresses (should not be modified ideally)\"\"\"\n input_text = \"Contact me at test @ example . com .\"\n expected = \"Contact me at test@example.com.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_urls(self):\n \"\"\"Test URLs (might be modified by current function)\"\"\"\n input_text = \"Visit https : //example.com / path .\"\n expected = \"Visit https://example.com/path.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_hashtags_and_mentions(self):\n \"\"\"Test hashtags and mentions\"\"\"\n input_text = \"Check out # topic and @ user .\"\n expected = \"Check out #topic and @user.\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Complex structures\n @pytest.mark.skip(reason=\"Failed\")\n def test_nested_parentheses(self):\n \"\"\"Test nested parentheses\"\"\"\n input_text = \"Outer ( inner ( deep ) ) .\"\n expected = \"Outer (inner (deep)).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_math_expressions(self):\n \"\"\"Test mathematical expressions\"\"\"\n input_text = \"Calculate 2 + 2 = 4 .\"\n expected = \"Calculate 2 + 2 = 4.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_html_tags(self):\n \"\"\"Test HTML tags\"\"\"\n input_text = \"< p > This is a paragraph . < / p >\"\n expected = \"<p> This is a paragraph. </p>\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_programming_code(self):\n \"\"\"Test programming code snippets\"\"\"\n input_text = \"Code : if ( x > 0 ) { print ( 'hello' ) ; }\"\n expected = \"Code: if (x > 0) {print ('hello');}\"\n assert remove_redundant_spaces(input_text) == expected\n\n # Unicode and special symbols\n @pytest.mark.skip(reason=\"Failed\")\n def test_unicode_and_special_symbols(self):\n \"\"\"Test Unicode characters and special symbols\"\"\"\n input_text = \"Copyright © 2023 , All rights reserved .\"\n expected = \"Copyright © 2023, All rights reserved.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_mixed_chinese_english(self):\n \"\"\"Test mixed Chinese and English text\"\"\"\n input_text = \"你好 , world ! 这是 ( 测试 ) .\"\n expected = \"你好, world! 这是 (测试).\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_special_characters_in_pattern(self):\n \"\"\"Test special characters in the pattern\"\"\"\n input_text = \"Price is $ 100 . 00 , tax included .\"\n expected = \"Price is $100.00, tax included.\"\n assert remove_redundant_spaces(input_text) == expected\n\n @pytest.mark.skip(reason=\"Failed\")\n def test_tabs_and_newlines(self):\n \"\"\"Test tabs and newlines handling\"\"\"\n input_text = \"Hello ,\\tworld !\\nThis is ( test ) .\"\n expected = \"Hello,\\tworld!\\nThis is (test).\"\n assert remove_redundant_spaces(input_text) == expected\n\n\nclass TestCleanMarkdownBlock:\n\n def test_standard_markdown_block(self):\n \"\"\"Test standard Markdown code block syntax\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_with_whitespace_variations(self):\n \"\"\"Test markdown blocks with various whitespace patterns\"\"\"\n input_text = \" ```markdown \\n Content here \\n ``` \"\n expected = \"Content here\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiline_content(self):\n \"\"\"Test markdown blocks with multiple lines of content\"\"\"\n input_text = \"```markdown\\nLine 1\\nLine 2\\nLine 3\\n```\"\n expected = \"Line 1\\nLine 2\\nLine 3\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_opening_newline(self):\n \"\"\"Test markdown block without newline after opening tag\"\"\"\n input_text = \"```markdownHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_no_closing_newline(self):\n \"\"\"Test markdown block without newline before closing tag\"\"\"\n input_text = \"```markdown\\nHello world```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_markdown_block(self):\n \"\"\"Test empty Markdown code block\"\"\"\n input_text = \"```markdown\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_whitespace_content(self):\n \"\"\"Test markdown block containing only whitespace\"\"\"\n input_text = \"```markdown\\n \\n\\t\\n\\n```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_plain_text_without_markdown(self):\n \"\"\"Test text that doesn't contain markdown block syntax\"\"\"\n input_text = \"This is plain text without any code blocks\"\n expected = \"This is plain text without any code blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_partial_markdown_syntax(self):\n \"\"\"Test text with only opening or closing tags\"\"\"\n input_text = \"```markdown\\nUnclosed block\"\n expected = \"Unclosed block\"\n assert clean_markdown_block(input_text) == expected\n\n input_text = \"Unopened block\\n```\"\n expected = \"Unopened block\"\n assert clean_markdown_block(input_text) == expected\n\n def test_mixed_whitespace_characters(self):\n \"\"\"Test with tabs, spaces, and mixed whitespace\"\"\"\n input_text = \"\\t```markdown\\t\\n\\tContent with tabs\\n\\t```\\t\"\n expected = \"Content with tabs\"\n assert clean_markdown_block(input_text) == expected\n\n def test_preserves_internal_whitespace(self):\n \"\"\"Test that internal whitespace is preserved\"\"\"\n input_text = \"```markdown\\n Preserve internal \\n whitespace \\n```\"\n expected = \"Preserve internal \\n whitespace\"\n assert clean_markdown_block(input_text) == expected\n\n def test_special_characters_content(self):\n \"\"\"Test markdown block with special characters\"\"\"\n input_text = \"```markdown\\n# Header\\n**Bold** and *italic*\\n```\"\n expected = \"# Header\\n**Bold** and *italic*\"\n assert clean_markdown_block(input_text) == expected\n\n def test_empty_string(self):\n \"\"\"Test empty string input\"\"\"\n input_text = \"\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_only_markdown_tags(self):\n \"\"\"Test input containing only Markdown tags\"\"\"\n input_text = \"```markdown```\"\n expected = \"\"\n assert clean_markdown_block(input_text) == expected\n\n def test_windows_line_endings(self):\n \"\"\"Test markdown block with Windows line endings\"\"\"\n input_text = \"```markdown\\r\\nHello world\\r\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_unix_line_endings(self):\n \"\"\"Test markdown block with Unix line endings\"\"\"\n input_text = \"```markdown\\nHello world\\n```\"\n expected = \"Hello world\"\n assert clean_markdown_block(input_text) == expected\n\n def test_nested_code_blocks_preserved(self):\n \"\"\"Test that nested code blocks within content are preserved\"\"\"\n input_text = \"```markdown\\nText with ```nested``` blocks\\n```\"\n expected = \"Text with ```nested``` blocks\"\n assert clean_markdown_block(input_text) == expected\n\n def test_multiple_markdown_blocks(self):\n \"\"\"Test behavior with multiple markdown blocks (takes first and last)\"\"\"\n input_text = \"```markdown\\nFirst line\\n```\\n```markdown\\nSecond line\\n```\"\n expected = \"First line\\n```\\n```markdown\\nSecond line\"\n assert clean_markdown_block(input_text) == expected\n\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_string_utils.py::TestRemoveRedundantSpaces::test_tabs_and_newlines -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 7, "file_lines": 74, "has_docstring": true, "num_tests": 32} | {"status": "passed", "tests_run": 32} | repo_patch/0038 | file_overlap |
repo_patch/0025 | infiniflow/ragflow | rag/utils/raptor_utils.py | get_skip_reason | get_skip_reason | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions for Raptor processing decisions.
"""
import logging
from typing import Optional
# File extensions for structured data types
EXCEL_EXTENSIONS = {".xls", ".xlsx", ".xlsm", ".xlsb"}
CSV_EXTENSIONS = {".csv", ".tsv"}
STRUCTURED_EXTENSIONS = EXCEL_EXTENSIONS | CSV_EXTENSIONS
def is_structured_file_type(file_type: Optional[str]) -> bool:
"""
Check if a file type is structured data (Excel, CSV, etc.)
Args:
file_type: File extension (e.g., ".xlsx", ".csv")
Returns:
True if file is structured data type
"""
if not file_type:
return False
# Normalize to lowercase and ensure leading dot
file_type = file_type.lower()
if not file_type.startswith("."):
file_type = f".{file_type}"
return file_type in STRUCTURED_EXTENSIONS
def is_tabular_pdf(parser_id: str = "", parser_config: Optional[dict] = None) -> bool:
"""
Check if a PDF is being parsed as tabular data.
Args:
parser_id: Parser ID (e.g., "table", "naive")
parser_config: Parser configuration dict
Returns:
True if PDF is being parsed as tabular data
"""
parser_config = parser_config or {}
# If using table parser, it's tabular
if parser_id and parser_id.lower() == "table":
return True
# Check if html4excel is enabled (Excel-like table parsing)
if parser_config.get("html4excel", False):
return True
return False
def should_skip_raptor(
file_type: Optional[str] = None,
parser_id: str = "",
parser_config: Optional[dict] = None,
raptor_config: Optional[dict] = None
) -> bool:
"""
Determine if Raptor should be skipped for a given document.
This function implements the logic to automatically disable Raptor for:
1. Excel files (.xls, .xlsx, .csv, etc.)
2. PDFs with tabular data (using table parser or html4excel)
Args:
file_type: File extension (e.g., ".xlsx", ".pdf")
parser_id: Parser ID being used
parser_config: Parser configuration dict
raptor_config: Raptor configuration dict (can override with auto_disable_for_structured_data)
Returns:
True if Raptor should be skipped, False otherwise
"""
parser_config = parser_config or {}
raptor_config = raptor_config or {}
# Check if auto-disable is explicitly disabled in config
if raptor_config.get("auto_disable_for_structured_data", True) is False:
logging.info("Raptor auto-disable is turned off via configuration")
return False
# Check for Excel/CSV files
if is_structured_file_type(file_type):
logging.info(f"Skipping Raptor for structured file type: {file_type}")
return True
# Check for tabular PDFs
if file_type and file_type.lower() in [".pdf", "pdf"]:
if is_tabular_pdf(parser_id, parser_config):
logging.info(f"Skipping Raptor for tabular PDF (parser_id={parser_id})")
return True
return False
def get_skip_reason(
file_type: Optional[str] = None,
parser_id: str = "",
parser_config: Optional[dict] = None
) -> str:
"""
Get a human-readable reason why Raptor was skipped.
Args:
file_type: File extension
parser_id: Parser ID being used
parser_config: Parser configuration dict
Returns:
Reason string, or empty string if Raptor should not be skipped
"""
# TODO: Implement this function | def get_skip_reason(
file_type: Optional[str] = None,
parser_id: str = "",
parser_config: Optional[dict] = None
) -> str:
"""
Get a human-readable reason why Raptor was skipped.
Args:
file_type: File extension
parser_id: Parser ID being used
parser_config: Parser configuration dict
Returns:
Reason string, or empty string if Raptor should not be skipped
""" | Get a human-readable reason why Raptor was skipped.
Args:
file_type: File extension
parser_id: Parser ID being used
parser_config: Parser configuration dict
Returns:
Reason string, or empty string if Raptor should not be skipped | parser_config = parser_config or {}
if is_structured_file_type(file_type):
return f"Structured data file ({file_type}) - Raptor auto-disabled"
if file_type and file_type.lower() in [".pdf", "pdf"]:
if is_tabular_pdf(parser_id, parser_config):
return f"Tabular PDF (parser={parser_id}) - Raptor auto-disabled"
return "" | def get_skip_reason(
file_type: Optional[str] = None,
parser_id: str = "",
parser_config: Optional[dict] = None
) -> str:
"""
Get a human-readable reason why Raptor was skipped.
Args:
file_type: File extension
parser_id: Parser ID being used
parser_config: Parser configuration dict
Returns:
Reason string, or empty string if Raptor should not be skipped
"""
parser_config = parser_config or {}
if is_structured_file_type(file_type):
return f"Structured data file ({file_type}) - Raptor auto-disabled"
if file_type and file_type.lower() in [".pdf", "pdf"]:
if is_tabular_pdf(parser_id, parser_config):
return f"Tabular PDF (parser={parser_id}) - Raptor auto-disabled"
return "" | [{"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestGetSkipReason.test_excel_skip_reason", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestGetSkipReason::test_excel_skip_reason -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestGetSkipReason.test_csv_skip_reason", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestGetSkipReason::test_csv_skip_reason -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestGetSkipReason.test_tabular_pdf_skip_reason", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestGetSkipReason::test_tabular_pdf_skip_reason -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestGetSkipReason.test_html4excel_skip_reason", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestGetSkipReason::test_html4excel_skip_reason -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestGetSkipReason.test_no_skip_reason_for_regular_files", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestGetSkipReason::test_no_skip_reason_for_regular_files -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestEdgeCases.test_none_values", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestEdgeCases::test_none_values -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestEdgeCases.test_empty_strings", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestEdgeCases::test_empty_strings -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestIntegrationScenarios.test_financial_excel_report", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestIntegrationScenarios::test_financial_excel_report -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestIntegrationScenarios.test_scientific_csv_data", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestIntegrationScenarios::test_scientific_csv_data -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestIntegrationScenarios.test_legal_contract_with_tables", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestIntegrationScenarios::test_legal_contract_with_tables -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestIntegrationScenarios.test_text_heavy_pdf_document", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestIntegrationScenarios::test_text_heavy_pdf_document -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 7, "file_lines": 145, "has_docstring": true, "num_tests": 11} | {"status": "passed", "tests_run": 11} | repo_patch/0039 | file_overlap |
repo_patch/0026 | infiniflow/ragflow | common/file_utils.py | get_project_base_directory | get_project_base_directory | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
def get_project_base_directory(*args):
# TODO: Implement this function
def traversal_files(base):
for root, ds, fs in os.walk(base):
for f in fs:
fullname = os.path.join(root, f)
yield fullname | def get_project_base_directory(*args): | global PROJECT_BASE
if PROJECT_BASE is None:
PROJECT_BASE = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
)
)
if args:
return os.path.join(PROJECT_BASE, *args)
return PROJECT_BASE | def get_project_base_directory(*args):
global PROJECT_BASE
if PROJECT_BASE is None:
PROJECT_BASE = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
)
)
if args:
return os.path.join(PROJECT_BASE, *args)
return PROJECT_BASE | [{"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_returns_project_base_when_no_args", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_returns_project_base_when_no_args -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_returns_path_with_single_argument", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_returns_path_with_single_argument -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_returns_path_with_multiple_arguments", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_returns_path_with_multiple_arguments -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_uses_environment_variable_when_available", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_uses_environment_variable_when_available -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_calculates_default_path_when_no_env_vars", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_calculates_default_path_when_no_env_vars -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_caches_project_base_value", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_caches_project_base_value -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_path_components_joined_correctly", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_path_components_joined_correctly -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "TestGetProjectBaseDirectory.test_handles_empty_string_arguments", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::TestGetProjectBaseDirectory::test_handles_empty_string_arguments -xvs"}, {"test_file": "test/unit_test/common/test_file_utils.py", "test_function": "test_various_path_combinations", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport pytest\nfrom unittest.mock import patch\nfrom common import file_utils\nfrom common.file_utils import get_project_base_directory\n\n\nclass TestGetProjectBaseDirectory:\n \"\"\"Test cases for get_project_base_directory function\"\"\"\n\n def test_returns_project_base_when_no_args(self):\n \"\"\"Test that function returns project base directory when no arguments provided\"\"\"\n result = get_project_base_directory()\n\n assert result is not None\n assert isinstance(result, str)\n assert os.path.isabs(result) # Should return absolute path\n\n def test_returns_path_with_single_argument(self):\n \"\"\"Test that function joins project base with single additional path component\"\"\"\n result = get_project_base_directory(\"subfolder\")\n\n assert result is not None\n assert \"subfolder\" in result\n assert result.endswith(\"subfolder\")\n\n def test_returns_path_with_multiple_arguments(self):\n \"\"\"Test that function joins project base with multiple path components\"\"\"\n result = get_project_base_directory(\"folder1\", \"folder2\", \"file.txt\")\n\n assert result is not None\n assert \"folder1\" in result\n assert \"folder2\" in result\n assert \"file.txt\" in result\n assert os.path.basename(result) == \"file.txt\"\n\n def test_uses_environment_variable_when_available(self):\n \"\"\"Test that function uses RAG_PROJECT_BASE environment variable when set\"\"\"\n test_path = \"/custom/project/path\"\n\n file_utils.PROJECT_BASE = test_path\n\n result = get_project_base_directory()\n assert result == test_path\n\n def test_calculates_default_path_when_no_env_vars(self):\n \"\"\"Test that function calculates default path when no environment variables are set\"\"\"\n with patch.dict(os.environ, {}, clear=True): # Clear all environment variables\n # Reset the global variable to force re-initialization\n\n result = get_project_base_directory()\n\n # Should return a valid absolute path\n assert result is not None\n assert os.path.isabs(result)\n assert os.path.basename(result) != \"\" # Should not be root directory\n\n def test_caches_project_base_value(self):\n \"\"\"Test that PROJECT_BASE is cached after first calculation\"\"\"\n # Reset the global variable\n\n # First call should calculate the value\n first_result = get_project_base_directory()\n\n # Store the current value\n cached_value = file_utils.PROJECT_BASE\n\n # Second call should use cached value\n second_result = get_project_base_directory()\n\n assert first_result == second_result\n assert file_utils.PROJECT_BASE == cached_value\n\n def test_path_components_joined_correctly(self):\n \"\"\"Test that path components are properly joined with the base directory\"\"\"\n base_path = get_project_base_directory()\n expected_path = os.path.join(base_path, \"data\", \"files\", \"document.txt\")\n\n result = get_project_base_directory(\"data\", \"files\", \"document.txt\")\n\n assert result == expected_path\n\n def test_handles_empty_string_arguments(self):\n \"\"\"Test that function handles empty string arguments correctly\"\"\"\n result = get_project_base_directory(\"\")\n\n # Should still return a valid path (base directory)\n assert result is not None\n assert os.path.isabs(result)\n\n\n# Parameterized tests for different path combinations\n@pytest.mark.parametrize(\"path_args,expected_suffix\", [\n ((), \"\"), # No additional arguments\n ((\"src\",), \"src\"),\n ((\"data\", \"models\"), os.path.join(\"data\", \"models\")),\n ((\"config\", \"app\", \"settings.json\"), os.path.join(\"config\", \"app\", \"settings.json\")),\n])\ndef test_various_path_combinations(path_args, expected_suffix):\n \"\"\"Test various combinations of path arguments\"\"\"\n base_path = get_project_base_directory()\n result = get_project_base_directory(*path_args)\n\n if expected_suffix:\n assert result.endswith(expected_suffix)\n else:\n assert result == base_path\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_file_utils.py::test_various_path_combinations -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 11, "file_lines": 40, "has_docstring": false, "num_tests": 9} | {"status": "passed", "tests_run": 9} | repo_patch/0040 | file_overlap | |
repo_patch/0027 | infiniflow/ragflow | common/misc_utils.py | convert_bytes | convert_bytes | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import functools
import hashlib
import logging
import os
import subprocess
import sys
import threading
import uuid
from concurrent.futures import ThreadPoolExecutor
import requests
def get_uuid():
return uuid.uuid1().hex
def download_img(url):
if not url:
return ""
response = requests.get(url)
return "data:" + \
response.headers.get('Content-Type', 'image/jpg') + ";" + \
"base64," + base64.b64encode(response.content).decode("utf-8")
def hash_str2int(line: str, mod: int = 10 ** 8) -> int:
return int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16) % mod
def convert_bytes(size_in_bytes: int) -> str:
"""
Format size in bytes.
"""
# TODO: Implement this function
def once(func):
"""
A thread-safe decorator that ensures the decorated function runs exactly once,
caching and returning its result for all subsequent calls. This prevents
race conditions in multi-thread environments by using a lock to protect
the execution state.
Args:
func (callable): The function to be executed only once.
Returns:
callable: A wrapper function that executes `func` on the first call
and returns the cached result thereafter.
Example:
@once
def compute_expensive_value():
print("Computing...")
return 42
# First call: executes and prints
# Subsequent calls: return 42 without executing
"""
executed = False
result = None
lock = threading.Lock()
def wrapper(*args, **kwargs):
nonlocal executed, result
with lock:
if not executed:
executed = True
result = func(*args, **kwargs)
return result
return wrapper
@once
def pip_install_torch():
device = os.getenv("DEVICE", "cpu")
if device=="cpu":
return
logging.info("Installing pytorch")
pkg_names = ["torch>=2.5.0,<3.0.0"]
subprocess.check_call([sys.executable, "-m", "pip", "install", *pkg_names])
@once
def _thread_pool_executor():
max_workers_env = os.getenv("THREAD_POOL_MAX_WORKERS", "128")
try:
max_workers = int(max_workers_env)
except ValueError:
max_workers = 128
if max_workers < 1:
max_workers = 1
return ThreadPoolExecutor(max_workers=max_workers)
async def thread_pool_exec(func, *args, **kwargs):
loop = asyncio.get_running_loop()
if kwargs:
func = functools.partial(func, *args, **kwargs)
return await loop.run_in_executor(_thread_pool_executor(), func)
return await loop.run_in_executor(_thread_pool_executor(), func, *args) | def convert_bytes(size_in_bytes: int) -> str:
"""
Format size in bytes.
""" | Format size in bytes. | if size_in_bytes == 0:
return "0 B"
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
i = 0
size = float(size_in_bytes)
while size >= 1024 and i < len(units) - 1:
size /= 1024
i += 1
if i == 0 or size >= 100:
return f"{size:.0f} {units[i]}"
elif size >= 10:
return f"{size:.1f} {units[i]}"
else:
return f"{size:.2f} {units[i]}" | def convert_bytes(size_in_bytes: int) -> str:
"""
Format size in bytes.
"""
if size_in_bytes == 0:
return "0 B"
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
i = 0
size = float(size_in_bytes)
while size >= 1024 and i < len(units) - 1:
size /= 1024
i += 1
if i == 0 or size >= 100:
return f"{size:.0f} {units[i]}"
elif size >= 10:
return f"{size:.1f} {units[i]}"
else:
return f"{size:.2f} {units[i]}" | [{"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_zero_bytes", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_zero_bytes -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_single_byte", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_single_byte -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_kilobyte_range", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_kilobyte_range -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_megabyte_range", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_megabyte_range -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_gigabyte_range", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_gigabyte_range -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_terabyte_range", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_terabyte_range -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_petabyte_range", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_petabyte_range -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_boundary_values", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_boundary_values -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_precision_transitions", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_precision_transitions -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestConvertBytes.test_large_values_no_overflow", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestConvertBytes::test_large_values_no_overflow -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 14, "file_lines": 134, "has_docstring": true, "num_tests": 10} | {"status": "passed", "tests_run": 10} | repo_patch/0042 | file_overlap |
repo_patch/0028 | infiniflow/ragflow | common/misc_utils.py | download_img | download_img | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import base64
import functools
import hashlib
import logging
import os
import subprocess
import sys
import threading
import uuid
from concurrent.futures import ThreadPoolExecutor
import requests
def get_uuid():
return uuid.uuid1().hex
def download_img(url):
# TODO: Implement this function
def hash_str2int(line: str, mod: int = 10 ** 8) -> int:
return int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16) % mod
def convert_bytes(size_in_bytes: int) -> str:
"""
Format size in bytes.
"""
if size_in_bytes == 0:
return "0 B"
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
i = 0
size = float(size_in_bytes)
while size >= 1024 and i < len(units) - 1:
size /= 1024
i += 1
if i == 0 or size >= 100:
return f"{size:.0f} {units[i]}"
elif size >= 10:
return f"{size:.1f} {units[i]}"
else:
return f"{size:.2f} {units[i]}"
def once(func):
"""
A thread-safe decorator that ensures the decorated function runs exactly once,
caching and returning its result for all subsequent calls. This prevents
race conditions in multi-thread environments by using a lock to protect
the execution state.
Args:
func (callable): The function to be executed only once.
Returns:
callable: A wrapper function that executes `func` on the first call
and returns the cached result thereafter.
Example:
@once
def compute_expensive_value():
print("Computing...")
return 42
# First call: executes and prints
# Subsequent calls: return 42 without executing
"""
executed = False
result = None
lock = threading.Lock()
def wrapper(*args, **kwargs):
nonlocal executed, result
with lock:
if not executed:
executed = True
result = func(*args, **kwargs)
return result
return wrapper
@once
def pip_install_torch():
device = os.getenv("DEVICE", "cpu")
if device=="cpu":
return
logging.info("Installing pytorch")
pkg_names = ["torch>=2.5.0,<3.0.0"]
subprocess.check_call([sys.executable, "-m", "pip", "install", *pkg_names])
@once
def _thread_pool_executor():
max_workers_env = os.getenv("THREAD_POOL_MAX_WORKERS", "128")
try:
max_workers = int(max_workers_env)
except ValueError:
max_workers = 128
if max_workers < 1:
max_workers = 1
return ThreadPoolExecutor(max_workers=max_workers)
async def thread_pool_exec(func, *args, **kwargs):
loop = asyncio.get_running_loop()
if kwargs:
func = functools.partial(func, *args, **kwargs)
return await loop.run_in_executor(_thread_pool_executor(), func)
return await loop.run_in_executor(_thread_pool_executor(), func, *args) | def download_img(url): | if not url:
return ""
response = requests.get(url)
return "data:" + \
response.headers.get('Content-Type', 'image/jpg') + ";" + \
"base64," + base64.b64encode(response.content).decode("utf-8") | def download_img(url):
if not url:
return ""
response = requests.get(url)
return "data:" + \
response.headers.get('Content-Type', 'image/jpg') + ";" + \
"base64," + base64.b64encode(response.content).decode("utf-8") | [{"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestDownloadImg.test_empty_url_returns_empty_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestDownloadImg::test_empty_url_returns_empty_string -xvs"}, {"test_file": "test/unit_test/common/test_misc_utils.py", "test_function": "TestDownloadImg.test_none_url_returns_empty_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport uuid\nimport hashlib\nfrom common.misc_utils import get_uuid, download_img, hash_str2int, convert_bytes\n\n\nclass TestGetUuid:\n \"\"\"Test cases for get_uuid function\"\"\"\n\n def test_returns_string(self):\n \"\"\"Test that function returns a string\"\"\"\n result = get_uuid()\n assert isinstance(result, str)\n\n def test_hex_format(self):\n \"\"\"Test that returned string is in hex format\"\"\"\n result = get_uuid()\n # UUID v1 hex should be 32 characters (without dashes)\n assert len(result) == 32\n # Should only contain hexadecimal characters\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_no_dashes_in_result(self):\n \"\"\"Test that result contains no dashes\"\"\"\n result = get_uuid()\n assert '-' not in result\n\n def test_unique_results(self):\n \"\"\"Test that multiple calls return different UUIDs\"\"\"\n results = [get_uuid() for _ in range(10)]\n\n # All results should be unique\n assert len(results) == len(set(results))\n\n # All should be valid hex strings of correct length\n for result in results:\n assert len(result) == 32\n assert all(c in '0123456789abcdef' for c in result)\n\n def test_valid_uuid_structure(self):\n \"\"\"Test that the hex string can be converted back to UUID\"\"\"\n result = get_uuid()\n\n # Should be able to create UUID from the hex string\n reconstructed_uuid = uuid.UUID(hex=result)\n assert isinstance(reconstructed_uuid, uuid.UUID)\n\n # The hex representation should match the original\n assert reconstructed_uuid.hex == result\n\n def test_uuid1_specific_characteristics(self):\n \"\"\"Test that UUID v1 characteristics are present\"\"\"\n result = get_uuid()\n uuid_obj = uuid.UUID(hex=result)\n\n # UUID v1 should have version 1\n assert uuid_obj.version == 1\n\n # Variant should be RFC 4122\n assert uuid_obj.variant == 'specified in RFC 4122'\n\n def test_result_length_consistency(self):\n \"\"\"Test that all generated UUIDs have consistent length\"\"\"\n for _ in range(100):\n result = get_uuid()\n assert len(result) == 32\n\n def test_hex_characters_only(self):\n \"\"\"Test that only valid hex characters are used\"\"\"\n for _ in range(100):\n result = get_uuid()\n # Should only contain lowercase hex characters (UUID hex is lowercase)\n assert result.islower()\n assert all(c in '0123456789abcdef' for c in result)\n\n\nclass TestDownloadImg:\n \"\"\"Test cases for download_img function\"\"\"\n\n def test_empty_url_returns_empty_string(self):\n \"\"\"Test that empty URL returns empty string\"\"\"\n result = download_img(\"\")\n assert result == \"\"\n\n def test_none_url_returns_empty_string(self):\n \"\"\"Test that None URL returns empty string\"\"\"\n result = download_img(None)\n assert result == \"\"\n\n\nclass TestHashStr2Int:\n \"\"\"Test cases for hash_str2int function\"\"\"\n\n def test_basic_hashing(self):\n \"\"\"Test basic string hashing functionality\"\"\"\n result = hash_str2int(\"hello\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_default_mod_value(self):\n \"\"\"Test that default mod value is 10^8\"\"\"\n result = hash_str2int(\"test\")\n assert 0 <= result < 10 ** 8\n\n def test_custom_mod_value(self):\n \"\"\"Test with custom mod value\"\"\"\n result = hash_str2int(\"test\", mod=1000)\n assert isinstance(result, int)\n assert 0 <= result < 1000\n\n def test_same_input_same_output(self):\n \"\"\"Test that same input produces same output\"\"\"\n result1 = hash_str2int(\"consistent\")\n result2 = hash_str2int(\"consistent\")\n result3 = hash_str2int(\"consistent\")\n\n assert result1 == result2 == result3\n\n def test_different_input_different_output(self):\n \"\"\"Test that different inputs produce different outputs (usually)\"\"\"\n result1 = hash_str2int(\"hello\")\n result2 = hash_str2int(\"world\")\n result3 = hash_str2int(\"hello world\")\n\n # While hash collisions are possible, they're very unlikely for these inputs\n results = [result1, result2, result3]\n assert len(set(results)) == len(results)\n\n def test_empty_string(self):\n \"\"\"Test hashing empty string\"\"\"\n result = hash_str2int(\"\")\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_unicode_string(self):\n \"\"\"Test hashing unicode strings\"\"\"\n test_strings = [\n \"中文\",\n \"🚀火箭\",\n \"café\",\n \"🎉\",\n \"Hello 世界\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_special_characters(self):\n \"\"\"Test hashing strings with special characters\"\"\"\n test_strings = [\n \"hello@world.com\",\n \"test#123\",\n \"line\\nwith\\nnewlines\",\n \"tab\\tcharacter\",\n \"space in string\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_large_string(self):\n \"\"\"Test hashing large string\"\"\"\n large_string = \"x\" * 10000\n result = hash_str2int(large_string)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_mod_value_1(self):\n \"\"\"Test with mod value 1 (should always return 0)\"\"\"\n result = hash_str2int(\"any string\", mod=1)\n assert result == 0\n\n def test_mod_value_2(self):\n \"\"\"Test with mod value 2 (should return 0 or 1)\"\"\"\n result = hash_str2int(\"test\", mod=2)\n assert result in [0, 1]\n\n def test_very_large_mod(self):\n \"\"\"Test with very large mod value\"\"\"\n result = hash_str2int(\"test\", mod=10 ** 12)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 12\n\n def test_hash_algorithm_sha1(self):\n \"\"\"Test that SHA1 algorithm is used\"\"\"\n test_string = \"hello\"\n expected_hash = hashlib.sha1(test_string.encode(\"utf-8\")).hexdigest()\n expected_int = int(expected_hash, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_utf8_encoding(self):\n \"\"\"Test that UTF-8 encoding is used\"\"\"\n # This should work without encoding errors\n result = hash_str2int(\"café 🎉\")\n assert isinstance(result, int)\n\n def test_range_with_different_mods(self):\n \"\"\"Test that result is always in correct range for different mod values\"\"\"\n test_cases = [\n (\"test1\", 100),\n (\"test2\", 1000),\n (\"test3\", 10000),\n (\"test4\", 999999),\n ]\n\n for test_str, mod_val in test_cases:\n result = hash_str2int(test_str, mod=mod_val)\n assert 0 <= result < mod_val\n\n def test_hexdigest_conversion(self):\n \"\"\"Test the hexdigest to integer conversion\"\"\"\n test_string = \"hello\"\n hash_obj = hashlib.sha1(test_string.encode(\"utf-8\"))\n hex_digest = hash_obj.hexdigest()\n expected_int = int(hex_digest, 16) % (10 ** 8)\n\n result = hash_str2int(test_string)\n assert result == expected_int\n\n def test_consistent_with_direct_calculation(self):\n \"\"\"Test that function matches direct hashlib usage\"\"\"\n test_strings = [\"a\", \"b\", \"abc\", \"hello world\", \"12345\"]\n\n for test_str in test_strings:\n direct_result = int(hashlib.sha1(test_str.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 8)\n function_result = hash_str2int(test_str)\n assert function_result == direct_result\n\n def test_numeric_strings(self):\n \"\"\"Test hashing numeric strings\"\"\"\n test_strings = [\"123\", \"0\", \"999999\", \"3.14159\", \"-42\"]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n def test_whitespace_strings(self):\n \"\"\"Test hashing strings with various whitespace\"\"\"\n test_strings = [\n \" leading\",\n \"trailing \",\n \" both \",\n \"\\ttab\",\n \"new\\nline\",\n \"\\r\\nwindows\"\n ]\n\n for test_str in test_strings:\n result = hash_str2int(test_str)\n assert isinstance(result, int)\n assert 0 <= result < 10 ** 8\n\n\nclass TestConvertBytes:\n \"\"\"Test suite for convert_bytes function\"\"\"\n\n def test_zero_bytes(self):\n \"\"\"Test that 0 bytes returns '0 B'\"\"\"\n assert convert_bytes(0) == \"0 B\"\n\n def test_single_byte(self):\n \"\"\"Test single byte values\"\"\"\n assert convert_bytes(1) == \"1 B\"\n assert convert_bytes(999) == \"999 B\"\n\n def test_kilobyte_range(self):\n \"\"\"Test values in kilobyte range with different precisions\"\"\"\n # Exactly 1 KB\n assert convert_bytes(1024) == \"1.00 KB\"\n\n # Values that should show 1 decimal place (10-99.9 range)\n assert convert_bytes(15360) == \"15.0 KB\" # 15 KB exactly\n assert convert_bytes(10752) == \"10.5 KB\" # 10.5 KB\n\n # Values that should show 2 decimal places (1-9.99 range)\n assert convert_bytes(2048) == \"2.00 KB\" # 2 KB exactly\n assert convert_bytes(3072) == \"3.00 KB\" # 3 KB exactly\n assert convert_bytes(5120) == \"5.00 KB\" # 5 KB exactly\n\n def test_megabyte_range(self):\n \"\"\"Test values in megabyte range\"\"\"\n # Exactly 1 MB\n assert convert_bytes(1048576) == \"1.00 MB\"\n\n # Values with different precision requirements\n assert convert_bytes(15728640) == \"15.0 MB\" # 15.0 MB\n assert convert_bytes(11010048) == \"10.5 MB\" # 10.5 MB\n\n def test_gigabyte_range(self):\n \"\"\"Test values in gigabyte range\"\"\"\n # Exactly 1 GB\n assert convert_bytes(1073741824) == \"1.00 GB\"\n\n # Large value that should show 0 decimal places\n assert convert_bytes(3221225472) == \"3.00 GB\" # 3 GB exactly\n\n def test_terabyte_range(self):\n \"\"\"Test values in terabyte range\"\"\"\n assert convert_bytes(1099511627776) == \"1.00 TB\" # 1 TB\n\n def test_petabyte_range(self):\n \"\"\"Test values in petabyte range\"\"\"\n assert convert_bytes(1125899906842624) == \"1.00 PB\" # 1 PB\n\n def test_boundary_values(self):\n \"\"\"Test values at unit boundaries\"\"\"\n # Just below 1 KB\n assert convert_bytes(1023) == \"1023 B\"\n\n # Just above 1 KB\n assert convert_bytes(1025) == \"1.00 KB\"\n\n # At 100 KB boundary (should switch to 0 decimal places)\n assert convert_bytes(102400) == \"100 KB\"\n assert convert_bytes(102300) == \"99.9 KB\"\n\n def test_precision_transitions(self):\n \"\"\"Test the precision formatting transitions\"\"\"\n # Test transition from 2 decimal places to 1 decimal place\n assert convert_bytes(9216) == \"9.00 KB\" # 9.00 KB (2 decimal places)\n assert convert_bytes(10240) == \"10.0 KB\" # 10.0 KB (1 decimal place)\n\n # Test transition from 1 decimal place to 0 decimal places\n assert convert_bytes(102400) == \"100 KB\" # 100 KB (0 decimal places)\n\n def test_large_values_no_overflow(self):\n \"\"\"Test that very large values don't cause issues\"\"\"\n # Very large value that should use PB\n large_value = 10 * 1125899906842624 # 10 PB\n assert \"PB\" in convert_bytes(large_value)\n\n # Ensure we don't exceed available units\n huge_value = 100 * 1125899906842624 # 100 PB (still within PB range)\n assert \"PB\" in convert_bytes(huge_value)\n", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_misc_utils.py::TestDownloadImg::test_none_url_returns_empty_string -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 6, "file_lines": 134, "has_docstring": false, "num_tests": 2} | {"status": "passed", "tests_run": 2} | repo_patch/0043 | file_overlap | |
repo_patch/0029 | infiniflow/ragflow | rag/utils/raptor_utils.py | is_structured_file_type | is_structured_file_type | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions for Raptor processing decisions.
"""
import logging
from typing import Optional
# File extensions for structured data types
EXCEL_EXTENSIONS = {".xls", ".xlsx", ".xlsm", ".xlsb"}
CSV_EXTENSIONS = {".csv", ".tsv"}
STRUCTURED_EXTENSIONS = EXCEL_EXTENSIONS | CSV_EXTENSIONS
def is_structured_file_type(file_type: Optional[str]) -> bool:
"""
Check if a file type is structured data (Excel, CSV, etc.)
Args:
file_type: File extension (e.g., ".xlsx", ".csv")
Returns:
True if file is structured data type
"""
# TODO: Implement this function
def is_tabular_pdf(parser_id: str = "", parser_config: Optional[dict] = None) -> bool:
"""
Check if a PDF is being parsed as tabular data.
Args:
parser_id: Parser ID (e.g., "table", "naive")
parser_config: Parser configuration dict
Returns:
True if PDF is being parsed as tabular data
"""
parser_config = parser_config or {}
# If using table parser, it's tabular
if parser_id and parser_id.lower() == "table":
return True
# Check if html4excel is enabled (Excel-like table parsing)
if parser_config.get("html4excel", False):
return True
return False
def should_skip_raptor(
file_type: Optional[str] = None,
parser_id: str = "",
parser_config: Optional[dict] = None,
raptor_config: Optional[dict] = None
) -> bool:
"""
Determine if Raptor should be skipped for a given document.
This function implements the logic to automatically disable Raptor for:
1. Excel files (.xls, .xlsx, .csv, etc.)
2. PDFs with tabular data (using table parser or html4excel)
Args:
file_type: File extension (e.g., ".xlsx", ".pdf")
parser_id: Parser ID being used
parser_config: Parser configuration dict
raptor_config: Raptor configuration dict (can override with auto_disable_for_structured_data)
Returns:
True if Raptor should be skipped, False otherwise
"""
parser_config = parser_config or {}
raptor_config = raptor_config or {}
# Check if auto-disable is explicitly disabled in config
if raptor_config.get("auto_disable_for_structured_data", True) is False:
logging.info("Raptor auto-disable is turned off via configuration")
return False
# Check for Excel/CSV files
if is_structured_file_type(file_type):
logging.info(f"Skipping Raptor for structured file type: {file_type}")
return True
# Check for tabular PDFs
if file_type and file_type.lower() in [".pdf", "pdf"]:
if is_tabular_pdf(parser_id, parser_config):
logging.info(f"Skipping Raptor for tabular PDF (parser_id={parser_id})")
return True
return False
def get_skip_reason(
file_type: Optional[str] = None,
parser_id: str = "",
parser_config: Optional[dict] = None
) -> str:
"""
Get a human-readable reason why Raptor was skipped.
Args:
file_type: File extension
parser_id: Parser ID being used
parser_config: Parser configuration dict
Returns:
Reason string, or empty string if Raptor should not be skipped
"""
parser_config = parser_config or {}
if is_structured_file_type(file_type):
return f"Structured data file ({file_type}) - Raptor auto-disabled"
if file_type and file_type.lower() in [".pdf", "pdf"]:
if is_tabular_pdf(parser_id, parser_config):
return f"Tabular PDF (parser={parser_id}) - Raptor auto-disabled"
return "" | def is_structured_file_type(file_type: Optional[str]) -> bool:
"""
Check if a file type is structured data (Excel, CSV, etc.)
Args:
file_type: File extension (e.g., ".xlsx", ".csv")
Returns:
True if file is structured data type
""" | Check if a file type is structured data (Excel, CSV, etc.)
Args:
file_type: File extension (e.g., ".xlsx", ".csv")
Returns:
True if file is structured data type | if not file_type:
return False
# Normalize to lowercase and ensure leading dot
file_type = file_type.lower()
if not file_type.startswith("."):
file_type = f".{file_type}"
return file_type in STRUCTURED_EXTENSIONS | def is_structured_file_type(file_type: Optional[str]) -> bool:
"""
Check if a file type is structured data (Excel, CSV, etc.)
Args:
file_type: File extension (e.g., ".xlsx", ".csv")
Returns:
True if file is structured data type
"""
if not file_type:
return False
# Normalize to lowercase and ensure leading dot
file_type = file_type.lower()
if not file_type.startswith("."):
file_type = f".{file_type}"
return file_type in STRUCTURED_EXTENSIONS | [{"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestIsStructuredFileType.test_file_type_detection", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestIsStructuredFileType::test_file_type_detection -xvs"}, {"test_file": "test/unit_test/utils/test_raptor_utils.py", "test_function": "TestEdgeCases.test_case_insensitivity", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for Raptor utility functions.\n\"\"\"\n\nimport pytest\nfrom rag.utils.raptor_utils import (\n is_structured_file_type,\n is_tabular_pdf,\n should_skip_raptor,\n get_skip_reason,\n EXCEL_EXTENSIONS,\n CSV_EXTENSIONS,\n STRUCTURED_EXTENSIONS\n)\n\n\nclass TestIsStructuredFileType:\n \"\"\"Test file type detection for structured data\"\"\"\n\n @pytest.mark.parametrize(\"file_type,expected\", [\n (\".xlsx\", True),\n (\".xls\", True),\n (\".xlsm\", True),\n (\".xlsb\", True),\n (\".csv\", True),\n (\".tsv\", True),\n (\"xlsx\", True), # Without leading dot\n (\"XLSX\", True), # Uppercase\n (\".pdf\", False),\n (\".docx\", False),\n (\".txt\", False),\n (\"\", False),\n (None, False),\n ])\n def test_file_type_detection(self, file_type, expected):\n \"\"\"Test detection of various file types\"\"\"\n assert is_structured_file_type(file_type) == expected\n\n def test_excel_extensions_defined(self):\n \"\"\"Test that Excel extensions are properly defined\"\"\"\n assert \".xlsx\" in EXCEL_EXTENSIONS\n assert \".xls\" in EXCEL_EXTENSIONS\n assert len(EXCEL_EXTENSIONS) >= 4\n\n def test_csv_extensions_defined(self):\n \"\"\"Test that CSV extensions are properly defined\"\"\"\n assert \".csv\" in CSV_EXTENSIONS\n assert \".tsv\" in CSV_EXTENSIONS\n\n def test_structured_extensions_combined(self):\n \"\"\"Test that structured extensions include both Excel and CSV\"\"\"\n assert EXCEL_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n assert CSV_EXTENSIONS.issubset(STRUCTURED_EXTENSIONS)\n\n\nclass TestIsTabularPDF:\n \"\"\"Test tabular PDF detection\"\"\"\n\n def test_table_parser_detected(self):\n \"\"\"Test that table parser is detected as tabular\"\"\"\n assert is_tabular_pdf(\"table\", {}) is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n\n def test_html4excel_detected(self):\n \"\"\"Test that html4excel config is detected as tabular\"\"\"\n assert is_tabular_pdf(\"naive\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"\", {\"html4excel\": True}) is True\n\n def test_non_tabular_pdf(self):\n \"\"\"Test that non-tabular PDFs are not detected\"\"\"\n assert is_tabular_pdf(\"naive\", {}) is False\n assert is_tabular_pdf(\"naive\", {\"html4excel\": False}) is False\n assert is_tabular_pdf(\"\", {}) is False\n\n def test_combined_conditions(self):\n \"\"\"Test combined table parser and html4excel\"\"\"\n assert is_tabular_pdf(\"table\", {\"html4excel\": True}) is True\n assert is_tabular_pdf(\"table\", {\"html4excel\": False}) is True\n\n\nclass TestShouldSkipRaptor:\n \"\"\"Test Raptor skip logic\"\"\"\n\n def test_skip_excel_files(self):\n \"\"\"Test that Excel files skip Raptor\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\".xls\") is True\n assert should_skip_raptor(\".xlsm\") is True\n\n def test_skip_csv_files(self):\n \"\"\"Test that CSV files skip Raptor\"\"\"\n assert should_skip_raptor(\".csv\") is True\n assert should_skip_raptor(\".tsv\") is True\n\n def test_skip_tabular_pdf_with_table_parser(self):\n \"\"\"Test that tabular PDFs skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"table\") is True\n assert should_skip_raptor(\"pdf\", parser_id=\"TABLE\") is True\n\n def test_skip_tabular_pdf_with_html4excel(self):\n \"\"\"Test that PDFs with html4excel skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_config={\"html4excel\": True}) is True\n\n def test_dont_skip_regular_pdf(self):\n \"\"\"Test that regular PDFs don't skip Raptor\"\"\"\n assert should_skip_raptor(\".pdf\", parser_id=\"naive\") is False\n assert should_skip_raptor(\".pdf\", parser_config={}) is False\n\n def test_dont_skip_text_files(self):\n \"\"\"Test that text files don't skip Raptor\"\"\"\n assert should_skip_raptor(\".txt\") is False\n assert should_skip_raptor(\".docx\") is False\n assert should_skip_raptor(\".md\") is False\n\n def test_override_with_config(self):\n \"\"\"Test that auto-disable can be overridden\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should not skip even for Excel files\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".csv\", raptor_config=raptor_config) is False\n assert should_skip_raptor(\".pdf\", parser_id=\"table\", raptor_config=raptor_config) is False\n\n def test_default_auto_disable_enabled(self):\n \"\"\"Test that auto-disable is enabled by default\"\"\"\n # Empty raptor_config should default to auto_disable=True\n assert should_skip_raptor(\".xlsx\", raptor_config={}) is True\n assert should_skip_raptor(\".xlsx\", raptor_config=None) is True\n\n def test_explicit_auto_disable_enabled(self):\n \"\"\"Test explicit auto-disable enabled\"\"\"\n raptor_config = {\"auto_disable_for_structured_data\": True}\n assert should_skip_raptor(\".xlsx\", raptor_config=raptor_config) is True\n\n\nclass TestGetSkipReason:\n \"\"\"Test skip reason generation\"\"\"\n\n def test_excel_skip_reason(self):\n \"\"\"Test skip reason for Excel files\"\"\"\n reason = get_skip_reason(\".xlsx\")\n assert \"Structured data file\" in reason\n assert \".xlsx\" in reason\n assert \"auto-disabled\" in reason.lower()\n\n def test_csv_skip_reason(self):\n \"\"\"Test skip reason for CSV files\"\"\"\n reason = get_skip_reason(\".csv\")\n assert \"Structured data file\" in reason\n assert \".csv\" in reason\n\n def test_tabular_pdf_skip_reason(self):\n \"\"\"Test skip reason for tabular PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_id=\"table\")\n assert \"Tabular PDF\" in reason\n assert \"table\" in reason.lower()\n assert \"auto-disabled\" in reason.lower()\n\n def test_html4excel_skip_reason(self):\n \"\"\"Test skip reason for html4excel PDFs\"\"\"\n reason = get_skip_reason(\".pdf\", parser_config={\"html4excel\": True})\n assert \"Tabular PDF\" in reason\n\n def test_no_skip_reason_for_regular_files(self):\n \"\"\"Test that regular files have no skip reason\"\"\"\n assert get_skip_reason(\".txt\") == \"\"\n assert get_skip_reason(\".docx\") == \"\"\n assert get_skip_reason(\".pdf\", parser_id=\"naive\") == \"\"\n\n\nclass TestEdgeCases:\n \"\"\"Test edge cases and error handling\"\"\"\n\n def test_none_values(self):\n \"\"\"Test handling of None values\"\"\"\n assert should_skip_raptor(None) is False\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(None) == \"\"\n\n def test_empty_strings(self):\n \"\"\"Test handling of empty strings\"\"\"\n assert should_skip_raptor(\"\") is False\n assert get_skip_reason(\"\") == \"\"\n\n def test_case_insensitivity(self):\n \"\"\"Test case insensitive handling\"\"\"\n assert is_structured_file_type(\"XLSX\") is True\n assert is_structured_file_type(\"XlSx\") is True\n assert is_tabular_pdf(\"TABLE\", {}) is True\n assert is_tabular_pdf(\"TaBlE\", {}) is True\n\n def test_with_and_without_dot(self):\n \"\"\"Test file extensions with and without leading dot\"\"\"\n assert should_skip_raptor(\".xlsx\") is True\n assert should_skip_raptor(\"xlsx\") is True\n assert should_skip_raptor(\".CSV\") is True\n assert should_skip_raptor(\"csv\") is True\n\n\nclass TestIntegrationScenarios:\n \"\"\"Test real-world integration scenarios\"\"\"\n\n def test_financial_excel_report(self):\n \"\"\"Test scenario: Financial quarterly Excel report\"\"\"\n file_type = \".xlsx\"\n parser_id = \"naive\"\n parser_config = {}\n raptor_config = {\"use_raptor\": True}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Structured data file\" in reason\n\n def test_scientific_csv_data(self):\n \"\"\"Test scenario: Scientific experimental CSV results\"\"\"\n file_type = \".csv\"\n \n # Should skip Raptor\n assert should_skip_raptor(file_type) is True\n reason = get_skip_reason(file_type)\n assert \".csv\" in reason\n\n def test_legal_contract_with_tables(self):\n \"\"\"Test scenario: Legal contract PDF with tables\"\"\"\n file_type = \".pdf\"\n parser_id = \"table\"\n parser_config = {}\n \n # Should skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is True\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert \"Tabular PDF\" in reason\n\n def test_text_heavy_pdf_document(self):\n \"\"\"Test scenario: Text-heavy PDF document\"\"\"\n file_type = \".pdf\"\n parser_id = \"naive\"\n parser_config = {}\n \n # Should NOT skip Raptor\n assert should_skip_raptor(file_type, parser_id, parser_config) is False\n reason = get_skip_reason(file_type, parser_id, parser_config)\n assert reason == \"\"\n\n def test_mixed_dataset_processing(self):\n \"\"\"Test scenario: Mixed dataset with various file types\"\"\"\n files = [\n (\".xlsx\", \"naive\", {}, True), # Excel - skip\n (\".csv\", \"naive\", {}, True), # CSV - skip\n (\".pdf\", \"table\", {}, True), # Tabular PDF - skip\n (\".pdf\", \"naive\", {}, False), # Regular PDF - don't skip\n (\".docx\", \"naive\", {}, False), # Word doc - don't skip\n (\".txt\", \"naive\", {}, False), # Text file - don't skip\n ]\n \n for file_type, parser_id, parser_config, expected_skip in files:\n result = should_skip_raptor(file_type, parser_id, parser_config)\n assert result == expected_skip, f\"Failed for {file_type}\"\n\n def test_override_for_special_excel(self):\n \"\"\"Test scenario: Override auto-disable for special Excel processing\"\"\"\n file_type = \".xlsx\"\n raptor_config = {\"auto_disable_for_structured_data\": False}\n \n # Should NOT skip when explicitly disabled\n assert should_skip_raptor(file_type, raptor_config=raptor_config) is False\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-v\"])\n", "framework": "pytest", "test_command": "pytest test/unit_test/utils/test_raptor_utils.py::TestEdgeCases::test_case_insensitivity -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 7, "file_lines": 145, "has_docstring": true, "num_tests": 2} | {"status": "passed", "tests_run": 2} | repo_patch/0044 | file_overlap |
repo_patch/0030 | infiniflow/ragflow | common/float_utils.py | get_float | get_float | function | null | #
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def get_float(v):
"""
Convert a value to float, handling None and exceptions gracefully.
Attempts to convert the input value to a float. If the value is None or
cannot be converted to float, returns negative infinity as a default value.
Args:
v: The value to convert to float. Can be any type that float() accepts,
or None.
Returns:
float: The converted float value if successful, otherwise float('-inf').
Examples:
>>> get_float("3.14")
3.14
>>> get_float(None)
-inf
>>> get_float("invalid")
-inf
>>> get_float(42)
42.0
"""
# TODO: Implement this function
def normalize_overlapped_percent(overlapped_percent):
try:
value = float(overlapped_percent)
except (TypeError, ValueError):
return 0
if 0 < value < 1:
value *= 100
value = int(value)
return max(0, min(value, 90)) | def get_float(v):
"""
Convert a value to float, handling None and exceptions gracefully.
Attempts to convert the input value to a float. If the value is None or
cannot be converted to float, returns negative infinity as a default value.
Args:
v: The value to convert to float. Can be any type that float() accepts,
or None.
Returns:
float: The converted float value if successful, otherwise float('-inf').
Examples:
>>> get_float("3.14")
3.14
>>> get_float(None)
-inf
>>> get_float("invalid")
-inf
>>> get_float(42)
42.0
""" | Convert a value to float, handling None and exceptions gracefully.
Attempts to convert the input value to a float. If the value is None or
cannot be converted to float, returns negative infinity as a default value.
Args:
v: The value to convert to float. Can be any type that float() accepts,
or None.
Returns:
float: The converted float value if successful, otherwise float('-inf').
Examples:
>>> get_float("3.14")
3.14
>>> get_float(None)
-inf
>>> get_float("invalid")
-inf
>>> get_float(42)
42.0 | if v is None:
return float("-inf")
try:
return float(v)
except Exception:
return float("-inf") | def get_float(v):
"""
Convert a value to float, handling None and exceptions gracefully.
Attempts to convert the input value to a float. If the value is None or
cannot be converted to float, returns negative infinity as a default value.
Args:
v: The value to convert to float. Can be any type that float() accepts,
or None.
Returns:
float: The converted float value if successful, otherwise float('-inf').
Examples:
>>> get_float("3.14")
3.14
>>> get_float(None)
-inf
>>> get_float("invalid")
-inf
>>> get_float(42)
42.0
"""
if v is None:
return float("-inf")
try:
return float(v)
except Exception:
return float("-inf") | [{"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_valid_float_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_valid_float_string -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_valid_integer_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_valid_integer_string -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_valid_numbers", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_valid_numbers -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_none_input", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_none_input -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_invalid_strings", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_invalid_strings -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_boolean_input", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_boolean_input -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_special_float_strings", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_special_float_strings -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_very_large_numbers", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_very_large_numbers -xvs"}, {"test_file": "test/unit_test/common/test_float_utils.py", "test_function": "TestGetFloat.test_whitespace_strings", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport math\nfrom common.float_utils import get_float\n\nclass TestGetFloat:\n\n def test_valid_float_string(self):\n \"\"\"Test conversion of valid float strings\"\"\"\n assert get_float(\"3.14\") == 3.14\n assert get_float(\"-2.5\") == -2.5\n assert get_float(\"0.0\") == 0.0\n assert get_float(\"123.456\") == 123.456\n\n def test_valid_integer_string(self):\n \"\"\"Test conversion of valid integer strings\"\"\"\n assert get_float(\"42\") == 42.0\n assert get_float(\"-100\") == -100.0\n assert get_float(\"0\") == 0.0\n\n def test_valid_numbers(self):\n \"\"\"Test conversion of actual number types\"\"\"\n assert get_float(3.14) == 3.14\n assert get_float(-2.5) == -2.5\n assert get_float(42) == 42.0\n assert get_float(0) == 0.0\n\n def test_none_input(self):\n \"\"\"Test handling of None input\"\"\"\n result = get_float(None)\n assert math.isinf(result)\n assert result < 0 # Should be negative infinity\n\n def test_invalid_strings(self):\n \"\"\"Test handling of invalid string inputs\"\"\"\n result = get_float(\"invalid\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"12.34.56\")\n assert math.isinf(result)\n assert result < 0\n\n result = get_float(\"\")\n assert math.isinf(result)\n assert result < 0\n\n def test_boolean_input(self):\n \"\"\"Test conversion of boolean values\"\"\"\n assert get_float(True) == 1.0\n assert get_float(False) == 0.0\n\n def test_special_float_strings(self):\n \"\"\"Test handling of special float strings\"\"\"\n assert get_float(\"inf\") == float('inf')\n assert get_float(\"-inf\") == float('-inf')\n\n # NaN should return -inf according to our function's design\n result = get_float(\"nan\")\n assert math.isnan(result)\n\n def test_very_large_numbers(self):\n \"\"\"Test very large number strings\"\"\"\n assert get_float(\"1e308\") == 1e308\n # This will become inf in Python, but let's test it\n large_result = get_float(\"1e500\")\n assert math.isinf(large_result)\n\n def test_whitespace_strings(self):\n \"\"\"Test strings with whitespace\"\"\"\n assert get_float(\" 3.14 \") == 3.14\n result = get_float(\" invalid \")\n assert math.isinf(result)\n assert result < 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_float_utils.py::TestGetFloat::test_whitespace_strings -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 6, "file_lines": 59, "has_docstring": true, "num_tests": 9} | {"status": "passed", "tests_run": 9} | repo_patch/0045 | clean |
repo_patch/0031 | infiniflow/ragflow | common/time_utils.py | date_string_to_timestamp | date_string_to_timestamp | function | null | #
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import time
def current_timestamp():
"""
Get the current timestamp in milliseconds.
Returns:
int: Current Unix timestamp in milliseconds (13 digits)
Example:
>>> current_timestamp()
1704067200000
"""
return int(time.time() * 1000)
def timestamp_to_date(timestamp, format_string="%Y-%m-%d %H:%M:%S"):
"""
Convert a timestamp to formatted date string.
Args:
timestamp: Unix timestamp in milliseconds. If None or empty, uses current time.
format_string: Format string for the output date (default: "%Y-%m-%d %H:%M:%S")
Returns:
str: Formatted date string
Example:
>>> timestamp_to_date(1704067200000)
'2024-01-01 08:00:00'
"""
if not timestamp:
timestamp = time.time()
timestamp = int(timestamp) / 1000
time_array = time.localtime(timestamp)
str_date = time.strftime(format_string, time_array)
return str_date
def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"):
"""
Convert a date string to timestamp in milliseconds.
Args:
time_str: Date string to convert
format_string: Format of the input date string (default: "%Y-%m-%d %H:%M:%S")
Returns:
int: Unix timestamp in milliseconds
Example:
>>> date_string_to_timestamp("2024-01-01 00:00:00")
1704067200000
"""
# TODO: Implement this function
def datetime_format(date_time: datetime.datetime) -> datetime.datetime:
"""
Normalize a datetime object by removing microsecond component.
Creates a new datetime object with only year, month, day, hour, minute, second.
Microseconds are set to 0.
Args:
date_time: datetime object to normalize
Returns:
datetime.datetime: New datetime object without microseconds
Example:
>>> dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)
>>> datetime_format(dt)
datetime.datetime(2024, 1, 1, 12, 30, 45)
"""
return datetime.datetime(date_time.year, date_time.month, date_time.day,
date_time.hour, date_time.minute, date_time.second)
def get_format_time() -> datetime.datetime:
"""
Get current datetime normalized without microseconds.
Returns:
datetime.datetime: Current datetime with microseconds set to 0
Example:
>>> get_format_time()
datetime.datetime(2024, 1, 1, 12, 30, 45)
"""
return datetime_format(datetime.datetime.now())
def delta_seconds(date_string: str):
"""
Calculate seconds elapsed from a given date string to now.
Args:
date_string: Date string in "YYYY-MM-DD HH:MM:SS" format
Returns:
float: Number of seconds between the given date and current time
Example:
>>> delta_seconds("2024-01-01 12:00:00")
3600.0 # If current time is 2024-01-01 13:00:00
"""
dt = datetime.datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S")
return (datetime.datetime.now() - dt).total_seconds()
def format_iso_8601_to_ymd_hms(time_str: str) -> str:
"""
Convert ISO 8601 formatted string to "YYYY-MM-DD HH:MM:SS" format.
Args:
time_str: ISO 8601 date string (e.g. "2024-01-01T12:00:00Z")
Returns:
str: Date string in "YYYY-MM-DD HH:MM:SS" format
Example:
>>> format_iso_8601_to_ymd_hms("2024-01-01T12:00:00Z")
'2024-01-01 12:00:00'
"""
from dateutil import parser
try:
if parser.isoparse(time_str):
dt = datetime.datetime.fromisoformat(time_str.replace("Z", "+00:00"))
return dt.strftime("%Y-%m-%d %H:%M:%S")
else:
return time_str
except Exception as e:
logging.error(str(e))
return time_str | def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"):
"""
Convert a date string to timestamp in milliseconds.
Args:
time_str: Date string to convert
format_string: Format of the input date string (default: "%Y-%m-%d %H:%M:%S")
Returns:
int: Unix timestamp in milliseconds
Example:
>>> date_string_to_timestamp("2024-01-01 00:00:00")
1704067200000
""" | Convert a date string to timestamp in milliseconds.
Args:
time_str: Date string to convert
format_string: Format of the input date string (default: "%Y-%m-%d %H:%M:%S")
Returns:
int: Unix timestamp in milliseconds
Example:
>>> date_string_to_timestamp("2024-01-01 00:00:00")
1704067200000 | time_array = time.strptime(time_str, format_string)
time_stamp = int(time.mktime(time_array) * 1000)
return time_stamp | def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"):
"""
Convert a date string to timestamp in milliseconds.
Args:
time_str: Date string to convert
format_string: Format of the input date string (default: "%Y-%m-%d %H:%M:%S")
Returns:
int: Unix timestamp in milliseconds
Example:
>>> date_string_to_timestamp("2024-01-01 00:00:00")
1704067200000
"""
time_array = time.strptime(time_str, format_string)
time_stamp = int(time.mktime(time_array) * 1000)
return time_stamp | [{"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_basic_date_string_conversion", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_basic_date_string_conversion -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_custom_format_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_custom_format_string -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_return_type_integer", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_return_type_integer -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_timestamp_in_milliseconds", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_timestamp_in_milliseconds -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_different_dates", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_different_dates -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_epoch_date", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_epoch_date -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_leap_year_date", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_leap_year_date -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_date_only_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_date_only_string -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_with_whitespace", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_with_whitespace -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_invalid_date_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_invalid_date_string -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_mismatched_format_string", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_mismatched_format_string -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_empty_string_input", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_empty_string_input -xvs"}, {"test_file": "test/unit_test/common/test_time_utils.py", "test_function": "TestDateStringToTimestamp.test_none_input", "test_content": "#\n# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport time\nimport datetime\nimport pytest\nfrom common.time_utils import current_timestamp, timestamp_to_date, date_string_to_timestamp, datetime_format, delta_seconds\n\n\nclass TestCurrentTimestamp:\n \"\"\"Test cases for current_timestamp function\"\"\"\n\n def test_returns_integer(self):\n \"\"\"Test that function returns an integer\"\"\"\n result = current_timestamp()\n assert isinstance(result, int)\n\n def test_returns_13_digits(self):\n \"\"\"Test that returned timestamp has 13 digits (milliseconds)\"\"\"\n result = current_timestamp()\n assert len(str(result)) == 13\n\n def test_approximately_correct_value(self):\n \"\"\"Test that returned value is approximately correct compared to current time\"\"\"\n # Get timestamps before and after function call for comparison\n before = int(time.time() * 1000)\n result = current_timestamp()\n after = int(time.time() * 1000)\n\n assert before <= result <= after\n\n def test_consistent_with_time_module(self):\n \"\"\"Test that result matches time.time() * 1000 calculation\"\"\"\n expected = int(time.time() * 1000)\n result = current_timestamp()\n\n # Allow small difference due to execution time (typically 1-2ms)\n assert abs(result - expected) <= 10\n\n def test_multiple_calls_increase(self):\n \"\"\"Test that multiple calls return increasing timestamps\"\"\"\n results = [current_timestamp() for _ in range(5)]\n\n # Check if timestamps are monotonically increasing\n # (allow equal values as they might be in the same millisecond)\n for i in range(1, len(results)):\n assert results[i] >= results[i - 1]\n\n\nclass TestTimestampToDate:\n \"\"\"Test cases for timestamp_to_date function\"\"\"\n\n def test_basic_timestamp_conversion(self):\n \"\"\"Test basic timestamp to date conversion with default format\"\"\"\n # Test with a specific timestamp\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format string\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n local = time.localtime(timestamp / 1000)\n\n # Test different format strings\n result1 = timestamp_to_date(timestamp, \"%Y-%m-%d\")\n assert result1 == time.strftime(\"%Y-%m-%d\", local)\n\n result2 = timestamp_to_date(timestamp, \"%H:%M:%S\")\n assert result2 == time.strftime(\"%H:%M:%S\", local)\n\n result3 = timestamp_to_date(timestamp, \"%Y/%m/%d %H:%M\")\n assert result3 == time.strftime(\"%Y/%m/%d %H:%M\", local)\n\n def test_zero_timestamp(self):\n \"\"\"Test conversion with zero timestamp (epoch)\"\"\"\n timestamp = 0 # 1970-01-01 00:00:00 UTC\n result = timestamp_to_date(timestamp)\n # Note: Actual result depends on local timezone\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_negative_timestamp(self):\n \"\"\"Test conversion with negative timestamp (pre-epoch)\"\"\"\n timestamp = -1000000 # Some time before 1970\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n assert len(result) > 0\n\n def test_string_timestamp_input(self):\n \"\"\"Test that string timestamp input is handled correctly\"\"\"\n timestamp_str = \"1704067200000\"\n result = timestamp_to_date(timestamp_str)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_str) / 1000))\n assert result == expected\n\n def test_float_timestamp_input(self):\n \"\"\"Test that float timestamp input is handled correctly\"\"\"\n timestamp_float = 1704067200000.0\n result = timestamp_to_date(timestamp_float)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp_float) / 1000))\n assert result == expected\n\n def test_different_timezones_handled(self):\n \"\"\"Test that function handles timezone conversion properly\"\"\"\n timestamp = 1704067200000 # 2024-01-01 00:00:00 UTC\n\n # The actual result will depend on the system's local timezone\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n # Should contain date components\n assert \"2024\" in result or \"08:00:00\" in result\n\n def test_millisecond_precision(self):\n \"\"\"Test that milliseconds are properly handled (truncated)\"\"\"\n # Test timestamp with milliseconds component\n timestamp = 1704067200123 # 2024-01-01 00:00:00.123 UTC\n result = timestamp_to_date(timestamp)\n\n # Milliseconds are truncated, so result should match the base timestamp\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(int(timestamp) / 1000))\n assert result == expected\n\n def test_various_timestamps(self):\n \"\"\"Test conversion with various timestamp values\"\"\"\n test_cases = [1609459200000, 4102444800000]\n\n for timestamp in test_cases:\n result = timestamp_to_date(timestamp)\n expected = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(timestamp / 1000))\n assert result == expected\n\n def test_return_type_always_string(self):\n \"\"\"Test that return type is always string regardless of input\"\"\"\n test_inputs = [1704067200000, None, \"\", 0, -1000, \"1704067200000\"]\n\n for timestamp in test_inputs:\n result = timestamp_to_date(timestamp)\n assert isinstance(result, str)\n\n def test_edge_case_format_strings(self):\n \"\"\"Test edge cases with unusual format strings\"\"\"\n timestamp = 1704067200000\n\n # Empty format string\n result = timestamp_to_date(timestamp, \"\")\n assert result == \"\"\n\n # Single character format\n result = timestamp_to_date(timestamp, \"Y\")\n assert isinstance(result, str)\n\n # Format with only separators\n result = timestamp_to_date(timestamp, \"---\")\n assert result == \"---\"\n\n\nclass TestDateStringToTimestamp:\n \"\"\"Test cases for date_string_to_timestamp function\"\"\"\n\n def test_basic_date_string_conversion(self):\n \"\"\"Test basic date string to timestamp conversion with default format\"\"\"\n date_string = \"2024-01-01 08:00:00\"\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_custom_format_string(self):\n \"\"\"Test conversion with custom format strings\"\"\"\n # Test different date formats\n test_cases = [\n (\"2024-01-01\", \"%Y-%m-%d\"),\n (\"2024/01/01 12:30:45\", \"%Y/%m/%d %H:%M:%S\"),\n (\"01-01-2024\", \"%m-%d-%Y\"),\n (\"20240101\", \"%Y%m%d\"),\n ]\n\n for date_string, format_string in test_cases:\n result = date_string_to_timestamp(date_string, format_string)\n expected = int(time.mktime(time.strptime(date_string, format_string)) * 1000)\n assert result == expected\n\n def test_return_type_integer(self):\n \"\"\"Test that function always returns integer\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n\n def test_timestamp_in_milliseconds(self):\n \"\"\"Test that returned timestamp is in milliseconds (13 digits)\"\"\"\n date_string = \"2024-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert len(str(result)) == 13\n\n # Verify it's milliseconds by checking it's 1000x larger than seconds timestamp\n seconds_timestamp = time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\"))\n expected_milliseconds = int(seconds_timestamp * 1000)\n assert result == expected_milliseconds\n\n def test_different_dates(self):\n \"\"\"Test conversion with various date strings\"\"\"\n test_cases = [\n \"2024-01-01 00:00:00\",\n \"2020-12-31 16:00:00\",\n \"2023-06-15 14:30:00\",\n \"2025-12-25 23:59:59\",\n ]\n\n for date_string in test_cases:\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_epoch_date(self):\n \"\"\"Test conversion with epoch date (1970-01-01)\"\"\"\n # Note: The actual value depends on the local timezone\n date_string = \"1970-01-01 00:00:00\"\n result = date_string_to_timestamp(date_string)\n assert isinstance(result, int)\n # Should be a small positive or negative number depending on timezone\n assert abs(result) < 86400000 # Within 24 hours in milliseconds\n\n def test_leap_year_date(self):\n \"\"\"Test conversion with leap year date\"\"\"\n date_string = \"2024-02-29 12:00:00\" # Valid leap year date\n result = date_string_to_timestamp(date_string)\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")) * 1000)\n assert result == expected\n\n def test_date_only_string(self):\n \"\"\"Test conversion with date-only format (assumes 00:00:00 time)\"\"\"\n date_string = \"2024-01-01\"\n result = date_string_to_timestamp(date_string, \"%Y-%m-%d\")\n # Should be equivalent to \"2024-01-01 00:00:00\" in local timezone\n expected = int(time.mktime(time.strptime(date_string, \"%Y-%m-%d\")) * 1000)\n assert result == expected\n\n def test_with_whitespace(self):\n \"\"\"Test that function handles whitespace properly\"\"\"\n test_cases = [\n \" 2024-01-01 00:00:00 \",\n \"\\t2024-01-01 00:00:00\\n\",\n ]\n\n for date_string in test_cases:\n # These should raise ValueError due to extra whitespace\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string)\n\n def test_invalid_date_string(self):\n \"\"\"Test that invalid date string raises ValueError\"\"\"\n invalid_cases = [\n \"invalid-date\",\n \"2024-13-01 00:00:00\", # Invalid month\n \"2024-01-32 00:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 00:60:00\", # Invalid minute\n \"2024-02-30 00:00:00\", # Invalid date (Feb 30)\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(invalid_date)\n\n def test_mismatched_format_string(self):\n \"\"\"Test that mismatched format string raises ValueError\"\"\"\n test_cases = [\n (\"2024-01-01 00:00:00\", \"%Y-%m-%d\"), # Missing time in format\n (\"2024-01-01\", \"%Y-%m-%d %H:%M:%S\"), # Missing time in date string\n (\"01/01/2024\", \"%Y-%m-%d\"), # Wrong separator\n ]\n\n for date_string, format_string in test_cases:\n with pytest.raises(ValueError):\n date_string_to_timestamp(date_string, format_string)\n\n def test_empty_string_input(self):\n \"\"\"Test that empty string input raises ValueError\"\"\"\n with pytest.raises(ValueError):\n date_string_to_timestamp(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n date_string_to_timestamp(None)\n\n\nclass TestDatetimeFormat:\n \"\"\"Test cases for datetime_format function\"\"\"\n\n def test_remove_microseconds(self):\n \"\"\"Test that microseconds are removed from datetime object\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify microseconds are 0\n assert result.microsecond == 0\n # Verify other components remain the same\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_zero_microseconds(self):\n \"\"\"Test datetime that already has zero microseconds\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 0)\n result = datetime_format(original_dt)\n\n # Should remain the same\n assert result == original_dt\n assert result.microsecond == 0\n\n def test_datetime_with_max_microseconds(self):\n \"\"\"Test datetime with maximum microseconds value\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 999999)\n result = datetime_format(original_dt)\n\n # Microseconds should be removed\n assert result.microsecond == 0\n # Other components should remain\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 12\n assert result.minute == 30\n assert result.second == 45\n\n def test_datetime_with_only_date_components(self):\n \"\"\"Test datetime with only date components (time defaults to 00:00:00)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1)\n result = datetime_format(original_dt)\n\n # Should have zero time components and zero microseconds\n assert result.year == 2024\n assert result.month == 1\n assert result.day == 1\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_midnight(self):\n \"\"\"Test datetime at midnight\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 0, 0, 0, 123456)\n result = datetime_format(original_dt)\n\n assert result.hour == 0\n assert result.minute == 0\n assert result.second == 0\n assert result.microsecond == 0\n\n def test_datetime_with_end_of_day(self):\n \"\"\"Test datetime at end of day (23:59:59)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 23, 59, 59, 999999)\n result = datetime_format(original_dt)\n\n assert result.hour == 23\n assert result.minute == 59\n assert result.second == 59\n assert result.microsecond == 0\n\n def test_leap_year_datetime(self):\n \"\"\"Test datetime on leap day\"\"\"\n original_dt = datetime.datetime(2024, 2, 29, 14, 30, 15, 500000)\n result = datetime_format(original_dt)\n\n assert result.year == 2024\n assert result.month == 2\n assert result.day == 29\n assert result.hour == 14\n assert result.minute == 30\n assert result.second == 15\n assert result.microsecond == 0\n\n def test_returns_new_object(self):\n \"\"\"Test that function returns a new datetime object, not the original\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Verify it's a different object\n assert result is not original_dt\n # Verify original is unchanged\n assert original_dt.microsecond == 123456\n\n def test_datetime_with_only_seconds(self):\n \"\"\"Test datetime with only seconds specified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45)\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result == original_dt.replace(microsecond=0)\n\n def test_immutability_of_original(self):\n \"\"\"Test that original datetime object is not modified\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n original_microsecond = original_dt.microsecond\n\n # Original should remain unchanged\n assert original_dt.microsecond == original_microsecond\n assert original_dt.microsecond == 123456\n\n def test_minimum_datetime_value(self):\n \"\"\"Test with minimum datetime value\"\"\"\n original_dt = datetime.datetime.min\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_maximum_datetime_value(self):\n \"\"\"Test with maximum datetime value\"\"\"\n original_dt = datetime.datetime.max\n result = datetime_format(original_dt)\n\n # Should have zero microseconds\n assert result.microsecond == 0\n # Other components should match\n assert result.year == original_dt.year\n assert result.month == original_dt.month\n assert result.day == original_dt.day\n\n def test_timezone_naive_datetime(self):\n \"\"\"Test with timezone-naive datetime (should remain naive)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n # Should remain timezone-naive\n assert result.tzinfo is None\n\n def test_equality_with_replaced_datetime(self):\n \"\"\"Test that result equals datetime.replace(microsecond=0)\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n expected = original_dt.replace(microsecond=0)\n\n assert result == expected\n\n @pytest.mark.parametrize(\"year,month,day,hour,minute,second,microsecond\", [\n (2024, 1, 1, 0, 0, 0, 0), # Start of day\n (2024, 12, 31, 23, 59, 59, 999999), # End of year\n (2000, 6, 15, 12, 30, 45, 500000), # Random date\n (1970, 1, 1, 0, 0, 0, 123456), # Epoch equivalent\n (2030, 3, 20, 6, 15, 30, 750000), # Future date\n ])\n def test_parametrized_datetimes(self, year, month, day, hour, minute, second, microsecond):\n \"\"\"Test multiple datetime scenarios using parametrization\"\"\"\n original_dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)\n result = datetime_format(original_dt)\n\n # Verify microseconds are removed\n assert result.microsecond == 0\n\n # Verify other components remain the same\n assert result.year == year\n assert result.month == month\n assert result.day == day\n assert result.hour == hour\n assert result.minute == minute\n assert result.second == second\n\n def test_consistency_across_multiple_calls(self):\n \"\"\"Test that multiple calls with same input produce same output\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n\n result1 = datetime_format(original_dt)\n result2 = datetime_format(original_dt)\n result3 = datetime_format(original_dt)\n\n # All results should be equal\n assert result1 == result2 == result3\n # All should have zero microseconds\n assert result1.microsecond == result2.microsecond == result3.microsecond == 0\n\n def test_type_return(self):\n \"\"\"Test that return type is datetime.datetime\"\"\"\n original_dt = datetime.datetime(2024, 1, 1, 12, 30, 45, 123456)\n result = datetime_format(original_dt)\n\n assert isinstance(result, datetime.datetime)\n\n\nclass TestDeltaSeconds:\n \"\"\"Test cases for delta_seconds function\"\"\"\n\n def test_zero_seconds_difference(self):\n \"\"\"Test when given time equals current time\"\"\"\n # Use a time very close to now to minimize test flakiness\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be very close to 0\n assert abs(result) < 1.0\n\n def test_positive_seconds_difference(self):\n \"\"\"Test positive time difference (past date)\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 3600 seconds (1 hour)\n assert abs(result - 3600.0) < 1.0\n\n def test_negative_seconds_difference(self):\n \"\"\"Test negative time difference (future date)\"\"\"\n now = datetime.datetime.now()\n future_time = now + datetime.timedelta(hours=1)\n date_string = future_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately -3600 seconds (1 hour)\n assert abs(result + 3600.0) < 1.0\n\n def test_minutes_difference(self):\n \"\"\"Test difference in minutes\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=5)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 300 seconds (5 minutes)\n assert abs(result - 300.0) < 1.0\n\n def test_return_type_float(self):\n \"\"\"Test that function returns float\"\"\"\n now = datetime.datetime.now()\n date_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_days_difference(self):\n \"\"\"Test difference across multiple days\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(days=1)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n # Should be approximately 86400 seconds (24 hours)\n assert abs(result - 86400.0) < 1.0\n\n def test_complex_time_difference(self):\n \"\"\"Test complex time difference with all components\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(hours=2, minutes=30, seconds=15)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n expected = 2 * 3600 + 30 * 60 + 15 # 2 hours + 30 minutes + 15 seconds\n assert abs(result - expected) < 1.0\n\n def test_invalid_date_format(self):\n \"\"\"Test that invalid date format raises ValueError\"\"\"\n invalid_cases = [\n \"2024-01-01\", # Missing time\n \"2024-01-01 12:00\", # Missing seconds\n \"2024/01/01 12:00:00\", # Wrong date separator\n \"01-01-2024 12:00:00\", # Wrong date format\n \"2024-13-01 12:00:00\", # Invalid month\n \"2024-01-32 12:00:00\", # Invalid day\n \"2024-01-01 25:00:00\", # Invalid hour\n \"2024-01-01 12:60:00\", # Invalid minute\n \"2024-01-01 12:00:60\", # Invalid second\n \"invalid datetime string\", # Completely invalid\n ]\n\n for invalid_date in invalid_cases:\n with pytest.raises(ValueError):\n delta_seconds(invalid_date)\n\n def test_empty_string(self):\n \"\"\"Test that empty string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\"\")\n\n def test_none_input(self):\n \"\"\"Test that None input raises TypeError\"\"\"\n with pytest.raises(TypeError):\n delta_seconds(None)\n\n def test_whitespace_string(self):\n \"\"\"Test that whitespace-only string raises ValueError\"\"\"\n with pytest.raises(ValueError):\n delta_seconds(\" \")\n\n def test_very_old_date(self):\n \"\"\"Test with very old date\"\"\"\n date_string = \"2000-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large positive number (many years in seconds)\n assert result > 0\n assert isinstance(result, float)\n\n def test_very_future_date(self):\n \"\"\"Test with very future date\"\"\"\n date_string = \"2030-01-01 12:00:00\"\n result = delta_seconds(date_string)\n # Should be a large negative number\n assert result < 0\n assert isinstance(result, float)\n\n def test_consistency_across_calls(self):\n \"\"\"Test that same input produces consistent results\"\"\"\n now = datetime.datetime.now()\n past_time = now - datetime.timedelta(minutes=10)\n date_string = past_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n result1 = delta_seconds(date_string)\n result2 = delta_seconds(date_string)\n result3 = delta_seconds(date_string)\n\n # All results should be very close (within 0.1 seconds)\n assert abs(result1 - result2) < 0.1\n assert abs(result2 - result3) < 0.1\n\n def test_leap_year_date(self):\n \"\"\"Test with leap year date (basic functionality)\"\"\"\n # This test verifies the function can handle leap year dates\n # without checking specific time differences\n date_string = \"2024-02-29 12:00:00\"\n result = delta_seconds(date_string)\n assert isinstance(result, float)\n\n def test_month_boundary(self):\n \"\"\"Test crossing month boundary\"\"\"\n now = datetime.datetime.now()\n # Use first day of current month at a specific time\n first_day = datetime.datetime(now.year, now.month, 1, 12, 0, 0)\n if first_day < now:\n date_string = first_day.strftime(\"%Y-%m-%d %H:%M:%S\")\n result = delta_seconds(date_string)\n assert result > 0 # Should be positive if first_day is in past\n else:\n # If we're testing on the first day of month\n date_string = \"2024-01-31 12:00:00\" # Use a known past date\n result = delta_seconds(date_string)\n assert result > 0", "framework": "pytest", "test_command": "pytest test/unit_test/common/test_time_utils.py::TestDateStringToTimestamp::test_none_input -xvs"}] | {"repo_url": "https://github.com/infiniflow/ragflow", "install_cmd": "pip install -e .", "commit_sha": "1c87f97dde78adc1d583b8bcc2f43502602db28e", "frozen_requirements": "frozen_requirements/infiniflow_ragflow.txt"} | {"body_lines": 3, "file_lines": 155, "has_docstring": true, "num_tests": 13} | {"status": "passed", "tests_run": 13} | repo_patch/0046 | clean |
repo_patch/0032 | mem0ai/mem0 | mem0/llms/vllm.py | generate_response | VllmLLM.generate_response | method | VllmLLM | import json
import os
from typing import Dict, List, Optional, Union
from openai import OpenAI
from mem0.configs.llms.base import BaseLlmConfig
from mem0.configs.llms.vllm import VllmConfig
from mem0.llms.base import LLMBase
from mem0.memory.utils import extract_json
class VllmLLM(LLMBase):
def __init__(self, config: Optional[Union[BaseLlmConfig, VllmConfig, Dict]] = None):
# Convert to VllmConfig if needed
if config is None:
config = VllmConfig()
elif isinstance(config, dict):
config = VllmConfig(**config)
elif isinstance(config, BaseLlmConfig) and not isinstance(config, VllmConfig):
# Convert BaseLlmConfig to VllmConfig
config = VllmConfig(
model=config.model,
temperature=config.temperature,
api_key=config.api_key,
max_tokens=config.max_tokens,
top_p=config.top_p,
top_k=config.top_k,
enable_vision=config.enable_vision,
vision_details=config.vision_details,
http_client_proxies=config.http_client,
)
super().__init__(config)
if not self.config.model:
self.config.model = "Qwen/Qwen2.5-32B-Instruct"
self.config.api_key = self.config.api_key or os.getenv("VLLM_API_KEY") or "vllm-api-key"
base_url = self.config.vllm_base_url or os.getenv("VLLM_BASE_URL")
self.client = OpenAI(api_key=self.config.api_key, base_url=base_url)
def _parse_response(self, response, tools):
"""
Process the response based on whether tools are used or not.
Args:
response: The raw response from API.
tools: The list of tools provided in the request.
Returns:
str or dict: The processed response.
"""
if tools:
processed_response = {
"content": response.choices[0].message.content,
"tool_calls": [],
}
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
processed_response["tool_calls"].append(
{
"name": tool_call.function.name,
"arguments": json.loads(extract_json(tool_call.function.arguments)),
}
)
return processed_response
else:
return response.choices[0].message.content
def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
"""
Generate a response based on the given messages using vLLM.
Args:
messages (list): List of message dicts containing 'role' and 'content'.
response_format (str or object, optional): Format of the response. Defaults to "text".
tools (list, optional): List of tools that the model can call. Defaults to None.
tool_choice (str, optional): Tool choice method. Defaults to "auto".
**kwargs: Additional vLLM-specific parameters.
Returns:
str: The generated response.
"""
# TODO: Implement this function | def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
"""
Generate a response based on the given messages using vLLM.
Args:
messages (list): List of message dicts containing 'role' and 'content'.
response_format (str or object, optional): Format of the response. Defaults to "text".
tools (list, optional): List of tools that the model can call. Defaults to None.
tool_choice (str, optional): Tool choice method. Defaults to "auto".
**kwargs: Additional vLLM-specific parameters.
Returns:
str: The generated response.
""" | Generate a response based on the given messages using vLLM.
Args:
messages (list): List of message dicts containing 'role' and 'content'.
response_format (str or object, optional): Format of the response. Defaults to "text".
tools (list, optional): List of tools that the model can call. Defaults to None.
tool_choice (str, optional): Tool choice method. Defaults to "auto".
**kwargs: Additional vLLM-specific parameters.
Returns:
str: The generated response. | params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
"model": self.config.model,
"messages": messages,
}
)
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | def generate_response(
self,
messages: List[Dict[str, str]],
response_format=None,
tools: Optional[List[Dict]] = None,
tool_choice: str = "auto",
**kwargs,
):
"""
Generate a response based on the given messages using vLLM.
Args:
messages (list): List of message dicts containing 'role' and 'content'.
response_format (str or object, optional): Format of the response. Defaults to "text".
tools (list, optional): List of tools that the model can call. Defaults to None.
tool_choice (str, optional): Tool choice method. Defaults to "auto".
**kwargs: Additional vLLM-specific parameters.
Returns:
str: The generated response.
"""
params = self._get_supported_params(messages=messages, **kwargs)
params.update(
{
"model": self.config.model,
"messages": messages,
}
)
if tools:
params["tools"] = tools
params["tool_choice"] = tool_choice
response = self.client.chat.completions.create(**params)
return self._parse_response(response, tools) | [{"test_file": "tests/llms/test_vllm.py", "test_function": "test_generate_response_without_tools", "test_content": "from unittest.mock import MagicMock, Mock, patch\n\nimport pytest\n\nfrom mem0 import AsyncMemory, Memory\nfrom mem0.configs.llms.base import BaseLlmConfig\nfrom mem0.llms.vllm import VllmLLM\n\n\n@pytest.fixture\ndef mock_vllm_client():\n with patch(\"mem0.llms.vllm.OpenAI\") as mock_openai:\n mock_client = Mock()\n mock_openai.return_value = mock_client\n yield mock_client\n\n\ndef test_generate_response_without_tools(mock_vllm_client):\n config = BaseLlmConfig(model=\"Qwen/Qwen2.5-32B-Instruct\", temperature=0.7, max_tokens=100, top_p=1.0)\n llm = VllmLLM(config)\n messages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello, how are you?\"},\n ]\n\n mock_response = Mock()\n mock_response.choices = [Mock(message=Mock(content=\"I'm doing well, thank you for asking!\"))]\n mock_vllm_client.chat.completions.create.return_value = mock_response\n\n response = llm.generate_response(messages)\n\n mock_vllm_client.chat.completions.create.assert_called_once_with(\n model=\"Qwen/Qwen2.5-32B-Instruct\", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0\n )\n assert response == \"I'm doing well, thank you for asking!\"\n\n\ndef test_generate_response_with_tools(mock_vllm_client):\n config = BaseLlmConfig(model=\"Qwen/Qwen2.5-32B-Instruct\", temperature=0.7, max_tokens=100, top_p=1.0)\n llm = VllmLLM(config)\n messages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Add a new memory: Today is a sunny day.\"},\n ]\n tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"add_memory\",\n \"description\": \"Add a memory\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\"data\": {\"type\": \"string\", \"description\": \"Data to add to memory\"}},\n \"required\": [\"data\"],\n },\n },\n }\n ]\n\n mock_response = Mock()\n mock_message = Mock()\n mock_message.content = \"I've added the memory for you.\"\n\n mock_tool_call = Mock()\n mock_tool_call.function.name = \"add_memory\"\n mock_tool_call.function.arguments = '{\"data\": \"Today is a sunny day.\"}'\n\n mock_message.tool_calls = [mock_tool_call]\n mock_response.choices = [Mock(message=mock_message)]\n mock_vllm_client.chat.completions.create.return_value = mock_response\n\n response = llm.generate_response(messages, tools=tools)\n\n mock_vllm_client.chat.completions.create.assert_called_once_with(\n model=\"Qwen/Qwen2.5-32B-Instruct\",\n messages=messages,\n temperature=0.7,\n max_tokens=100,\n top_p=1.0,\n tools=tools,\n tool_choice=\"auto\",\n )\n\n assert response[\"content\"] == \"I've added the memory for you.\"\n assert len(response[\"tool_calls\"]) == 1\n assert response[\"tool_calls\"][0][\"name\"] == \"add_memory\"\n assert response[\"tool_calls\"][0][\"arguments\"] == {\"data\": \"Today is a sunny day.\"}\n\n\n\ndef create_mocked_memory():\n \"\"\"Create a fully mocked Memory instance for testing.\"\"\"\n with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \\\n patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \\\n patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \\\n patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:\n\n mock_llm = MagicMock()\n mock_llm_factory.return_value = mock_llm\n\n mock_embedder = MagicMock()\n mock_embedder.embed.return_value = [0.1, 0.2, 0.3]\n mock_embedder_factory.return_value = mock_embedder\n\n mock_vector_store = MagicMock()\n mock_vector_store.search.return_value = []\n mock_vector_store.add.return_value = None\n mock_vector_factory.return_value = mock_vector_store\n\n mock_sqlite.return_value = MagicMock()\n\n memory = Memory()\n memory.api_version = \"v1.0\"\n return memory, mock_llm, mock_vector_store\n\n\ndef create_mocked_async_memory():\n \"\"\"Create a fully mocked AsyncMemory instance for testing.\"\"\"\n with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \\\n patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \\\n patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \\\n patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:\n\n mock_llm = MagicMock()\n mock_llm_factory.return_value = mock_llm\n\n mock_embedder = MagicMock()\n mock_embedder.embed.return_value = [0.1, 0.2, 0.3]\n mock_embedder_factory.return_value = mock_embedder\n\n mock_vector_store = MagicMock()\n mock_vector_store.search.return_value = []\n mock_vector_store.add.return_value = None\n mock_vector_factory.return_value = mock_vector_store\n\n mock_sqlite.return_value = MagicMock()\n\n memory = AsyncMemory()\n memory.api_version = \"v1.0\"\n return memory, mock_llm, mock_vector_store\n\n\ndef test_thinking_tags_sync():\n \"\"\"Test thinking tags handling in Memory._add_to_vector_store (sync).\"\"\"\n memory, mock_llm, mock_vector_store = create_mocked_memory()\n \n # Mock LLM responses for both phases\n mock_llm.generate_response.side_effect = [\n ' <think>Sync fact extraction</think> \\n{\"facts\": [\"User loves sci-fi\"]}',\n ' <think>Sync memory actions</think> \\n{\"memory\": [{\"text\": \"Loves sci-fi\", \"event\": \"ADD\"}]}'\n ]\n \n mock_vector_store.search.return_value = []\n \n result = memory._add_to_vector_store(\n messages=[{\"role\": \"user\", \"content\": \"I love sci-fi movies\"}],\n metadata={}, \n filters={}, \n infer=True\n )\n \n assert len(result) == 1\n assert result[0][\"memory\"] == \"Loves sci-fi\"\n assert result[0][\"event\"] == \"ADD\"\n\n\n\n@pytest.mark.asyncio\nasync def test_async_thinking_tags_async():\n \"\"\"Test thinking tags handling in AsyncMemory._add_to_vector_store.\"\"\"\n memory, mock_llm, mock_vector_store = create_mocked_async_memory()\n \n # Directly mock llm.generate_response instead of via asyncio.to_thread\n mock_llm.generate_response.side_effect = [\n ' <think>Async fact extraction</think> \\n{\"facts\": [\"User loves sci-fi\"]}',\n ' <think>Async memory actions</think> \\n{\"memory\": [{\"text\": \"Loves sci-fi\", \"event\": \"ADD\"}]}'\n ]\n \n # Mock asyncio.to_thread to call the function directly (bypass threading)\n async def mock_to_thread(func, *args, **kwargs):\n if func == mock_llm.generate_response:\n return func(*args, **kwargs)\n elif hasattr(func, '__name__') and 'embed' in func.__name__:\n return [0.1, 0.2, 0.3]\n elif hasattr(func, '__name__') and 'search' in func.__name__:\n return []\n else:\n return func(*args, **kwargs)\n \n with patch('mem0.memory.main.asyncio.to_thread', side_effect=mock_to_thread):\n result = await memory._add_to_vector_store(\n messages=[{\"role\": \"user\", \"content\": \"I love sci-fi movies\"}],\n metadata={}, \n effective_filters={}, \n infer=True\n )\n \n assert len(result) == 1\n assert result[0][\"memory\"] == \"Loves sci-fi\"\n assert result[0][\"event\"] == \"ADD\"", "framework": "pytest", "test_command": "pytest tests/llms/test_vllm.py::test_generate_response_without_tools -xvs"}, {"test_file": "tests/llms/test_vllm.py", "test_function": "test_generate_response_with_tools", "test_content": "from unittest.mock import MagicMock, Mock, patch\n\nimport pytest\n\nfrom mem0 import AsyncMemory, Memory\nfrom mem0.configs.llms.base import BaseLlmConfig\nfrom mem0.llms.vllm import VllmLLM\n\n\n@pytest.fixture\ndef mock_vllm_client():\n with patch(\"mem0.llms.vllm.OpenAI\") as mock_openai:\n mock_client = Mock()\n mock_openai.return_value = mock_client\n yield mock_client\n\n\ndef test_generate_response_without_tools(mock_vllm_client):\n config = BaseLlmConfig(model=\"Qwen/Qwen2.5-32B-Instruct\", temperature=0.7, max_tokens=100, top_p=1.0)\n llm = VllmLLM(config)\n messages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello, how are you?\"},\n ]\n\n mock_response = Mock()\n mock_response.choices = [Mock(message=Mock(content=\"I'm doing well, thank you for asking!\"))]\n mock_vllm_client.chat.completions.create.return_value = mock_response\n\n response = llm.generate_response(messages)\n\n mock_vllm_client.chat.completions.create.assert_called_once_with(\n model=\"Qwen/Qwen2.5-32B-Instruct\", messages=messages, temperature=0.7, max_tokens=100, top_p=1.0\n )\n assert response == \"I'm doing well, thank you for asking!\"\n\n\ndef test_generate_response_with_tools(mock_vllm_client):\n config = BaseLlmConfig(model=\"Qwen/Qwen2.5-32B-Instruct\", temperature=0.7, max_tokens=100, top_p=1.0)\n llm = VllmLLM(config)\n messages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"Add a new memory: Today is a sunny day.\"},\n ]\n tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"add_memory\",\n \"description\": \"Add a memory\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\"data\": {\"type\": \"string\", \"description\": \"Data to add to memory\"}},\n \"required\": [\"data\"],\n },\n },\n }\n ]\n\n mock_response = Mock()\n mock_message = Mock()\n mock_message.content = \"I've added the memory for you.\"\n\n mock_tool_call = Mock()\n mock_tool_call.function.name = \"add_memory\"\n mock_tool_call.function.arguments = '{\"data\": \"Today is a sunny day.\"}'\n\n mock_message.tool_calls = [mock_tool_call]\n mock_response.choices = [Mock(message=mock_message)]\n mock_vllm_client.chat.completions.create.return_value = mock_response\n\n response = llm.generate_response(messages, tools=tools)\n\n mock_vllm_client.chat.completions.create.assert_called_once_with(\n model=\"Qwen/Qwen2.5-32B-Instruct\",\n messages=messages,\n temperature=0.7,\n max_tokens=100,\n top_p=1.0,\n tools=tools,\n tool_choice=\"auto\",\n )\n\n assert response[\"content\"] == \"I've added the memory for you.\"\n assert len(response[\"tool_calls\"]) == 1\n assert response[\"tool_calls\"][0][\"name\"] == \"add_memory\"\n assert response[\"tool_calls\"][0][\"arguments\"] == {\"data\": \"Today is a sunny day.\"}\n\n\n\ndef create_mocked_memory():\n \"\"\"Create a fully mocked Memory instance for testing.\"\"\"\n with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \\\n patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \\\n patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \\\n patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:\n\n mock_llm = MagicMock()\n mock_llm_factory.return_value = mock_llm\n\n mock_embedder = MagicMock()\n mock_embedder.embed.return_value = [0.1, 0.2, 0.3]\n mock_embedder_factory.return_value = mock_embedder\n\n mock_vector_store = MagicMock()\n mock_vector_store.search.return_value = []\n mock_vector_store.add.return_value = None\n mock_vector_factory.return_value = mock_vector_store\n\n mock_sqlite.return_value = MagicMock()\n\n memory = Memory()\n memory.api_version = \"v1.0\"\n return memory, mock_llm, mock_vector_store\n\n\ndef create_mocked_async_memory():\n \"\"\"Create a fully mocked AsyncMemory instance for testing.\"\"\"\n with patch('mem0.utils.factory.LlmFactory.create') as mock_llm_factory, \\\n patch('mem0.utils.factory.EmbedderFactory.create') as mock_embedder_factory, \\\n patch('mem0.utils.factory.VectorStoreFactory.create') as mock_vector_factory, \\\n patch('mem0.memory.storage.SQLiteManager') as mock_sqlite:\n\n mock_llm = MagicMock()\n mock_llm_factory.return_value = mock_llm\n\n mock_embedder = MagicMock()\n mock_embedder.embed.return_value = [0.1, 0.2, 0.3]\n mock_embedder_factory.return_value = mock_embedder\n\n mock_vector_store = MagicMock()\n mock_vector_store.search.return_value = []\n mock_vector_store.add.return_value = None\n mock_vector_factory.return_value = mock_vector_store\n\n mock_sqlite.return_value = MagicMock()\n\n memory = AsyncMemory()\n memory.api_version = \"v1.0\"\n return memory, mock_llm, mock_vector_store\n\n\ndef test_thinking_tags_sync():\n \"\"\"Test thinking tags handling in Memory._add_to_vector_store (sync).\"\"\"\n memory, mock_llm, mock_vector_store = create_mocked_memory()\n \n # Mock LLM responses for both phases\n mock_llm.generate_response.side_effect = [\n ' <think>Sync fact extraction</think> \\n{\"facts\": [\"User loves sci-fi\"]}',\n ' <think>Sync memory actions</think> \\n{\"memory\": [{\"text\": \"Loves sci-fi\", \"event\": \"ADD\"}]}'\n ]\n \n mock_vector_store.search.return_value = []\n \n result = memory._add_to_vector_store(\n messages=[{\"role\": \"user\", \"content\": \"I love sci-fi movies\"}],\n metadata={}, \n filters={}, \n infer=True\n )\n \n assert len(result) == 1\n assert result[0][\"memory\"] == \"Loves sci-fi\"\n assert result[0][\"event\"] == \"ADD\"\n\n\n\n@pytest.mark.asyncio\nasync def test_async_thinking_tags_async():\n \"\"\"Test thinking tags handling in AsyncMemory._add_to_vector_store.\"\"\"\n memory, mock_llm, mock_vector_store = create_mocked_async_memory()\n \n # Directly mock llm.generate_response instead of via asyncio.to_thread\n mock_llm.generate_response.side_effect = [\n ' <think>Async fact extraction</think> \\n{\"facts\": [\"User loves sci-fi\"]}',\n ' <think>Async memory actions</think> \\n{\"memory\": [{\"text\": \"Loves sci-fi\", \"event\": \"ADD\"}]}'\n ]\n \n # Mock asyncio.to_thread to call the function directly (bypass threading)\n async def mock_to_thread(func, *args, **kwargs):\n if func == mock_llm.generate_response:\n return func(*args, **kwargs)\n elif hasattr(func, '__name__') and 'embed' in func.__name__:\n return [0.1, 0.2, 0.3]\n elif hasattr(func, '__name__') and 'search' in func.__name__:\n return []\n else:\n return func(*args, **kwargs)\n \n with patch('mem0.memory.main.asyncio.to_thread', side_effect=mock_to_thread):\n result = await memory._add_to_vector_store(\n messages=[{\"role\": \"user\", \"content\": \"I love sci-fi movies\"}],\n metadata={}, \n effective_filters={}, \n infer=True\n )\n \n assert len(result) == 1\n assert result[0][\"memory\"] == \"Loves sci-fi\"\n assert result[0][\"event\"] == \"ADD\"", "framework": "pytest", "test_command": "pytest tests/llms/test_vllm.py::test_generate_response_with_tools -xvs"}] | {"repo_url": "https://github.com/mem0ai/mem0", "install_cmd": "pip install -e .", "commit_sha": "a0d8a02b948271a2b369f7d65f28805189a22970", "frozen_requirements": "frozen_requirements/mem0ai_mem0.txt"} | {"body_lines": 12, "file_lines": 108, "has_docstring": true, "num_tests": 2} | {"status": "passed", "tests_run": 2} | repo_patch/0048 | clean |
repo_patch/0033 | scrapy/scrapy | tests/utils/cmdline.py | call | call | function | null | from __future__ import annotations
import subprocess
import sys
from typing import Any
import pytest
from scrapy.utils.test import get_testenv
def call(*args: str, **popen_kwargs: Any) -> int:
# TODO: Implement this function
def proc(*args: str, **popen_kwargs: Any) -> tuple[int, str, str]:
args = (sys.executable, "-m", "scrapy.cmdline", *args)
try:
p = subprocess.run(
args,
check=False,
capture_output=True,
encoding="utf-8",
timeout=15,
env=get_testenv(),
**popen_kwargs,
)
except subprocess.TimeoutExpired:
pytest.fail("Command took too much time to complete")
return p.returncode, p.stdout, p.stderr | def call(*args: str, **popen_kwargs: Any) -> int: | args = (sys.executable, "-m", "scrapy.cmdline", *args)
return subprocess.call(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=get_testenv(),
**popen_kwargs,
) | def call(*args: str, **popen_kwargs: Any) -> int:
args = (sys.executable, "-m", "scrapy.cmdline", *args)
return subprocess.call(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=get_testenv(),
**popen_kwargs,
) | [{"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_crawlspider_not_exists_with_not_matched_url", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_crawlspider_not_exists_with_not_matched_url -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_arguments", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_arguments -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_list", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_list -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_dump", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_dump -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_same_name_as_project", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_same_name_as_project -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_same_filename_as_existing_spider", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_same_filename_as_existing_spider -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_url", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_url -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_template_start_urls", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_template_start_urls -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderStandaloneCommand.test_generate_standalone_spider", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderStandaloneCommand::test_generate_standalone_spider -xvs"}, {"test_file": "tests/test_engine.py", "test_function": "TestEngineDownloadAsync.test_download_async_redirect", "test_content": "from __future__ import annotations\n\nimport asyncio\nimport re\nimport subprocess\nimport sys\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom logging import DEBUG\nfrom typing import TYPE_CHECKING, cast\nfrom unittest.mock import Mock, call\nfrom urllib.parse import urlparse\n\nimport attr\nimport pytest\nfrom itemadapter import ItemAdapter\nfrom pydispatch import dispatcher\nfrom testfixtures import LogCapture\nfrom twisted.internet import defer\n\nfrom scrapy import signals\nfrom scrapy.core.engine import ExecutionEngine, _Slot\nfrom scrapy.core.scheduler import BaseScheduler\nfrom scrapy.exceptions import CloseSpider, IgnoreRequest\nfrom scrapy.http import Request, Response\nfrom scrapy.item import Field, Item\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import Spider\nfrom scrapy.utils.defer import (\n _schedule_coro,\n deferred_from_coro,\n maybe_deferred_to_future,\n)\nfrom scrapy.utils.signal import disconnect_all\nfrom scrapy.utils.spider import DefaultSpider\nfrom scrapy.utils.test import get_crawler\nfrom tests import get_testdata\nfrom tests.utils.decorators import coroutine_test, inline_callbacks_test\n\nif TYPE_CHECKING:\n from scrapy.core.scheduler import Scheduler\n from scrapy.crawler import Crawler\n from scrapy.statscollectors import MemoryStatsCollector\n from tests.mockserver.http import MockServer\n\n\nclass MyItem(Item):\n name = Field()\n url = Field()\n price = Field()\n\n\n@attr.s\nclass AttrsItem:\n name = attr.ib(default=\"\")\n url = attr.ib(default=\"\")\n price = attr.ib(default=0)\n\n\n@dataclass\nclass DataClassItem:\n name: str = \"\"\n url: str = \"\"\n price: int = 0\n\n\nclass MySpider(Spider):\n name = \"scrapytest.org\"\n\n itemurl_re = re.compile(r\"item\\d+.html\")\n name_re = re.compile(r\"<h1>(.*?)</h1>\", re.MULTILINE)\n price_re = re.compile(r\">Price: \\$(.*?)<\", re.MULTILINE)\n\n item_cls: type = MyItem\n\n def parse(self, response):\n xlink = LinkExtractor()\n itemre = re.compile(self.itemurl_re)\n for link in xlink.extract_links(response):\n if itemre.search(link.url):\n yield Request(url=link.url, callback=self.parse_item)\n\n def parse_item(self, response):\n adapter = ItemAdapter(self.item_cls())\n m = self.name_re.search(response.text)\n if m:\n adapter[\"name\"] = m.group(1)\n adapter[\"url\"] = response.url\n m = self.price_re.search(response.text)\n if m:\n adapter[\"price\"] = m.group(1)\n return adapter.item\n\n\nclass DupeFilterSpider(MySpider):\n async def start(self):\n for url in self.start_urls:\n yield Request(url) # no dont_filter=True\n\n\nclass DictItemsSpider(MySpider):\n item_cls = dict\n\n\nclass AttrsItemsSpider(MySpider):\n item_cls = AttrsItem\n\n\nclass DataClassItemsSpider(MySpider):\n item_cls = DataClassItem\n\n\nclass ItemZeroDivisionErrorSpider(MySpider):\n custom_settings = {\n \"ITEM_PIPELINES\": {\n \"tests.pipelines.ProcessWithZeroDivisionErrorPipeline\": 300,\n }\n }\n\n\nclass ChangeCloseReasonSpider(MySpider):\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n crawler.signals.connect(spider.spider_idle, signals.spider_idle)\n return spider\n\n def spider_idle(self):\n raise CloseSpider(reason=\"custom_reason\")\n\n\nclass CrawlerRun:\n \"\"\"A class to run the crawler and keep track of events occurred\"\"\"\n\n def __init__(self, spider_class):\n self.respplug = []\n self.reqplug = []\n self.reqdropped = []\n self.reqreached = []\n self.itemerror = []\n self.itemresp = []\n self.headers = {}\n self.bytes = defaultdict(list)\n self.signals_caught = {}\n self.spider_class = spider_class\n\n async def run(self, mockserver: MockServer) -> None:\n self.mockserver = mockserver\n\n start_urls = [\n self.geturl(\"/static/\"),\n self.geturl(\"/redirect\"),\n self.geturl(\"/redirect\"), # duplicate\n self.geturl(\"/numbers\"),\n ]\n\n for name, signal in vars(signals).items():\n if not name.startswith(\"_\"):\n dispatcher.connect(self.record_signal, signal)\n\n self.crawler = get_crawler(self.spider_class)\n self.crawler.signals.connect(self.item_scraped, signals.item_scraped)\n self.crawler.signals.connect(self.item_error, signals.item_error)\n self.crawler.signals.connect(self.headers_received, signals.headers_received)\n self.crawler.signals.connect(self.bytes_received, signals.bytes_received)\n self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)\n self.crawler.signals.connect(self.request_dropped, signals.request_dropped)\n self.crawler.signals.connect(\n self.request_reached, signals.request_reached_downloader\n )\n self.crawler.signals.connect(\n self.response_downloaded, signals.response_downloaded\n )\n self.crawler.crawl(start_urls=start_urls)\n\n self.deferred: defer.Deferred[None] = defer.Deferred()\n dispatcher.connect(self.stop, signals.engine_stopped)\n await maybe_deferred_to_future(self.deferred)\n\n async def stop(self):\n for name, signal in vars(signals).items():\n if not name.startswith(\"_\"):\n disconnect_all(signal)\n self.deferred.callback(None)\n await self.crawler.stop_async()\n\n def geturl(self, path: str) -> str:\n return self.mockserver.url(path)\n\n def getpath(self, url):\n u = urlparse(url)\n return u.path\n\n def item_error(self, item, response, spider, failure):\n self.itemerror.append((item, response, spider, failure))\n\n def item_scraped(self, item, spider, response):\n self.itemresp.append((item, response))\n\n def headers_received(self, headers, body_length, request, spider):\n self.headers[request] = headers\n\n def bytes_received(self, data, request, spider):\n self.bytes[request].append(data)\n\n def request_scheduled(self, request, spider):\n self.reqplug.append((request, spider))\n\n def request_reached(self, request, spider):\n self.reqreached.append((request, spider))\n\n def request_dropped(self, request, spider):\n self.reqdropped.append((request, spider))\n\n def response_downloaded(self, response, spider):\n self.respplug.append((response, spider))\n\n def record_signal(self, *args, **kwargs):\n \"\"\"Record a signal and its parameters\"\"\"\n signalargs = kwargs.copy()\n sig = signalargs.pop(\"signal\")\n signalargs.pop(\"sender\", None)\n self.signals_caught[sig] = signalargs\n\n\nclass TestEngineBase:\n @staticmethod\n def _assert_visited_urls(run: CrawlerRun) -> None:\n must_be_visited = [\n \"/static/\",\n \"/redirect\",\n \"/redirected\",\n \"/static/item1.html\",\n \"/static/item2.html\",\n \"/static/item999.html\",\n ]\n urls_visited = {rp[0].url for rp in run.respplug}\n urls_expected = {run.geturl(p) for p in must_be_visited}\n assert urls_expected <= urls_visited, (\n f\"URLs not visited: {list(urls_expected - urls_visited)}\"\n )\n\n @staticmethod\n def _assert_scheduled_requests(run: CrawlerRun, count: int) -> None:\n assert len(run.reqplug) == count\n\n paths_expected = [\n \"/static/item999.html\",\n \"/static/item2.html\",\n \"/static/item1.html\",\n ]\n\n urls_requested = {rq[0].url for rq in run.reqplug}\n urls_expected = {run.geturl(p) for p in paths_expected}\n assert urls_expected <= urls_requested\n scheduled_requests_count = len(run.reqplug)\n dropped_requests_count = len(run.reqdropped)\n responses_count = len(run.respplug)\n assert scheduled_requests_count == dropped_requests_count + responses_count\n assert len(run.reqreached) == responses_count\n\n @staticmethod\n def _assert_dropped_requests(run: CrawlerRun) -> None:\n assert len(run.reqdropped) == 1\n\n @staticmethod\n def _assert_downloaded_responses(run: CrawlerRun, count: int) -> None:\n # response tests\n assert len(run.respplug) == count\n assert len(run.reqreached) == count\n\n for response, _ in run.respplug:\n if run.getpath(response.url) == \"/static/item999.html\":\n assert response.status == 404\n if run.getpath(response.url) == \"/redirect\":\n assert response.status == 302\n\n @staticmethod\n def _assert_items_error(run: CrawlerRun) -> None:\n assert len(run.itemerror) == 2\n for item, response, spider, failure in run.itemerror:\n assert failure.value.__class__ is ZeroDivisionError\n assert spider == run.crawler.spider\n\n assert item[\"url\"] == response.url\n if \"item1.html\" in item[\"url\"]:\n assert item[\"name\"] == \"Item 1 name\"\n assert item[\"price\"] == \"100\"\n if \"item2.html\" in item[\"url\"]:\n assert item[\"name\"] == \"Item 2 name\"\n assert item[\"price\"] == \"200\"\n\n @staticmethod\n def _assert_scraped_items(run: CrawlerRun) -> None:\n assert len(run.itemresp) == 2\n for item, response in run.itemresp:\n item = ItemAdapter(item)\n assert item[\"url\"] == response.url\n if \"item1.html\" in item[\"url\"]:\n assert item[\"name\"] == \"Item 1 name\"\n assert item[\"price\"] == \"100\"\n if \"item2.html\" in item[\"url\"]:\n assert item[\"name\"] == \"Item 2 name\"\n assert item[\"price\"] == \"200\"\n\n @staticmethod\n def _assert_headers_received(run: CrawlerRun) -> None:\n for headers in run.headers.values():\n assert b\"Server\" in headers\n assert b\"TwistedWeb\" in headers[b\"Server\"]\n assert b\"Date\" in headers\n assert b\"Content-Type\" in headers\n\n @staticmethod\n def _assert_bytes_received(run: CrawlerRun) -> None:\n assert len(run.bytes) == 9\n for request, data in run.bytes.items():\n joined_data = b\"\".join(data)\n if run.getpath(request.url) == \"/static/\":\n assert joined_data == get_testdata(\"test_site\", \"index.html\")\n elif run.getpath(request.url) == \"/static/item1.html\":\n assert joined_data == get_testdata(\"test_site\", \"item1.html\")\n elif run.getpath(request.url) == \"/static/item2.html\":\n assert joined_data == get_testdata(\"test_site\", \"item2.html\")\n elif run.getpath(request.url) == \"/redirected\":\n assert joined_data == b\"Redirected here\"\n elif run.getpath(request.url) == \"/redirect\":\n assert (\n joined_data == b\"\\n<html>\\n\"\n b\" <head>\\n\"\n b' <meta http-equiv=\"refresh\" content=\"0;URL=/redirected\">\\n'\n b\" </head>\\n\"\n b' <body bgcolor=\"#FFFFFF\" text=\"#000000\">\\n'\n b' <a href=\"/redirected\">click here</a>\\n'\n b\" </body>\\n\"\n b\"</html>\\n\"\n )\n elif run.getpath(request.url) == \"/static/item999.html\":\n assert (\n joined_data == b\"\\n<html>\\n\"\n b\" <head><title>404 - No Such Resource</title></head>\\n\"\n b\" <body>\\n\"\n b\" <h1>No Such Resource</h1>\\n\"\n b\" <p>File not found.</p>\\n\"\n b\" </body>\\n\"\n b\"</html>\\n\"\n )\n elif run.getpath(request.url) == \"/numbers\":\n # signal was fired multiple times\n assert len(data) > 1\n # bytes were received in order\n numbers = [str(x).encode(\"utf8\") for x in range(2**18)]\n assert joined_data == b\"\".join(numbers)\n\n @staticmethod\n def _assert_signals_caught(run: CrawlerRun) -> None:\n assert signals.engine_started in run.signals_caught\n assert signals.engine_stopped in run.signals_caught\n assert signals.spider_opened in run.signals_caught\n assert signals.spider_idle in run.signals_caught\n assert signals.spider_closed in run.signals_caught\n assert signals.headers_received in run.signals_caught\n\n assert {\"spider\": run.crawler.spider} == run.signals_caught[\n signals.spider_opened\n ]\n assert {\"spider\": run.crawler.spider} == run.signals_caught[signals.spider_idle]\n assert {\n \"spider\": run.crawler.spider,\n \"reason\": \"finished\",\n } == run.signals_caught[signals.spider_closed]\n\n\nclass TestEngine(TestEngineBase):\n @coroutine_test\n async def test_crawler(self, mockserver: MockServer) -> None:\n for spider in (\n MySpider,\n DictItemsSpider,\n AttrsItemsSpider,\n DataClassItemsSpider,\n ):\n run = CrawlerRun(spider)\n await run.run(mockserver)\n self._assert_visited_urls(run)\n self._assert_scheduled_requests(run, count=9)\n self._assert_downloaded_responses(run, count=9)\n self._assert_scraped_items(run)\n self._assert_signals_caught(run)\n self._assert_bytes_received(run)\n\n @coroutine_test\n async def test_crawler_dupefilter(self, mockserver: MockServer) -> None:\n run = CrawlerRun(DupeFilterSpider)\n await run.run(mockserver)\n self._assert_scheduled_requests(run, count=8)\n self._assert_dropped_requests(run)\n\n @coroutine_test\n async def test_crawler_itemerror(self, mockserver: MockServer) -> None:\n run = CrawlerRun(ItemZeroDivisionErrorSpider)\n await run.run(mockserver)\n self._assert_items_error(run)\n\n @coroutine_test\n async def test_crawler_change_close_reason_on_idle(\n self, mockserver: MockServer\n ) -> None:\n run = CrawlerRun(ChangeCloseReasonSpider)\n await run.run(mockserver)\n assert {\n \"spider\": run.crawler.spider,\n \"reason\": \"custom_reason\",\n } == run.signals_caught[signals.spider_closed]\n\n @coroutine_test\n async def test_close_downloader(self):\n e = ExecutionEngine(get_crawler(MySpider), lambda _: None)\n await e.close_async()\n\n def test_close_without_downloader(self):\n class CustomException(Exception):\n pass\n\n class BadDownloader:\n def __init__(self, crawler):\n raise CustomException\n\n with pytest.raises(CustomException):\n ExecutionEngine(\n get_crawler(MySpider, {\"DOWNLOADER\": BadDownloader}), lambda _: None\n )\n\n @inline_callbacks_test\n def test_start_already_running_exception(self):\n crawler = get_crawler(DefaultSpider)\n crawler.spider = crawler._create_spider()\n e = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = e\n yield deferred_from_coro(e.open_spider_async())\n _schedule_coro(e.start_async())\n with pytest.raises(RuntimeError, match=\"Engine already running\"):\n yield deferred_from_coro(e.start_async())\n yield deferred_from_coro(e.stop_async())\n\n @pytest.mark.only_asyncio\n @coroutine_test\n async def test_start_already_running_exception_asyncio(self):\n crawler = get_crawler(DefaultSpider)\n crawler.spider = crawler._create_spider()\n e = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = e\n await e.open_spider_async()\n with pytest.raises(RuntimeError, match=\"Engine already running\"):\n await asyncio.gather(e.start_async(), e.start_async())\n await e.stop_async()\n\n @inline_callbacks_test\n def test_start_request_processing_exception(self):\n class BadRequestFingerprinter:\n def fingerprint(self, request):\n raise ValueError # to make Scheduler.enqueue_request() fail\n\n class SimpleSpider(Spider):\n name = \"simple\"\n\n async def start(self):\n yield Request(\"data:,\")\n\n crawler = get_crawler(\n SimpleSpider, {\"REQUEST_FINGERPRINTER_CLASS\": BadRequestFingerprinter}\n )\n with LogCapture() as log:\n yield crawler.crawl()\n assert \"Error while processing requests from start()\" in str(log)\n assert \"Spider closed (shutdown)\" in str(log)\n\n def test_short_timeout(self):\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"fetch\",\n \"-s\",\n \"CLOSESPIDER_TIMEOUT=0.001\",\n \"-s\",\n \"LOG_LEVEL=DEBUG\",\n \"http://toscrape.com\",\n )\n p = subprocess.Popen(\n args,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n )\n\n try:\n _, stderr = p.communicate(timeout=15)\n except subprocess.TimeoutExpired:\n p.kill()\n p.communicate()\n pytest.fail(\"Command took too much time to complete\")\n\n stderr_str = stderr.decode(\"utf-8\")\n assert \"AttributeError\" not in stderr_str, stderr_str\n assert \"AssertionError\" not in stderr_str, stderr_str\n\n\nclass TestEngineDownloadAsync:\n \"\"\"Test cases for ExecutionEngine.download_async().\"\"\"\n\n @pytest.fixture\n def engine(self) -> ExecutionEngine:\n crawler = get_crawler(MySpider)\n engine = ExecutionEngine(crawler, lambda _: None)\n engine.downloader.close()\n engine.downloader = Mock()\n engine._slot = Mock()\n engine._slot.inprogress = set()\n return engine\n\n @staticmethod\n async def _download(engine: ExecutionEngine, request: Request) -> Response:\n return await engine.download_async(request)\n\n @coroutine_test\n async def test_download_async_success(self, engine):\n \"\"\"Test basic successful async download of a request.\"\"\"\n request = Request(\"http://example.com\")\n response = Response(\"http://example.com\", body=b\"test body\")\n engine.spider = Mock()\n engine.downloader.fetch.return_value = defer.succeed(response)\n engine._slot.add_request = Mock()\n engine._slot.remove_request = Mock()\n\n result = await self._download(engine, request)\n assert result == response\n engine._slot.add_request.assert_called_once_with(request)\n engine._slot.remove_request.assert_called_once_with(request)\n engine.downloader.fetch.assert_called_once_with(request)\n\n @coroutine_test\n async def test_download_async_redirect(self, engine):\n \"\"\"Test async download with a redirect request.\"\"\"\n original_request = Request(\"http://example.com\")\n redirect_request = Request(\"http://example.com/redirect\")\n final_response = Response(\"http://example.com/redirect\", body=b\"redirected\")\n\n # First call returns redirect request, second call returns final response\n engine.downloader.fetch.side_effect = [\n defer.succeed(redirect_request),\n defer.succeed(final_response),\n ]\n engine.spider = Mock()\n engine._slot.add_request = Mock()\n engine._slot.remove_request = Mock()\n\n result = await self._download(engine, original_request)\n assert result == final_response\n assert engine.downloader.fetch.call_count == 2\n engine._slot.add_request.assert_has_calls(\n [call(original_request), call(redirect_request)]\n )\n engine._slot.remove_request.assert_has_calls(\n [call(original_request), call(redirect_request)]\n )\n\n @coroutine_test\n async def test_download_async_no_spider(self, engine):\n \"\"\"Test async download attempt when no spider is available.\"\"\"\n request = Request(\"http://example.com\")\n engine.spider = None\n with pytest.raises(RuntimeError, match=\"No open spider to crawl:\"):\n await self._download(engine, request)\n\n @coroutine_test\n async def test_download_async_failure(self, engine):\n \"\"\"Test async download when the downloader raises an exception.\"\"\"\n request = Request(\"http://example.com\")\n error = RuntimeError(\"Download failed\")\n engine.spider = Mock()\n engine.downloader.fetch.return_value = defer.fail(error)\n engine._slot.add_request = Mock()\n engine._slot.remove_request = Mock()\n\n with pytest.raises(RuntimeError, match=\"Download failed\"):\n await self._download(engine, request)\n engine._slot.add_request.assert_called_once_with(request)\n engine._slot.remove_request.assert_called_once_with(request)\n\n\n@pytest.mark.filterwarnings(\"ignore::scrapy.exceptions.ScrapyDeprecationWarning\")\nclass TestEngineDownload(TestEngineDownloadAsync):\n \"\"\"Test cases for ExecutionEngine.download().\"\"\"\n\n @staticmethod\n async def _download(engine: ExecutionEngine, request: Request) -> Response:\n return await maybe_deferred_to_future(engine.download(request))\n\n\n@pytest.mark.requires_reactor # needs a reactor or an event loop for _Slot.heartbeat\ndef test_request_scheduled_signal(caplog):\n class TestScheduler(BaseScheduler):\n def __init__(self):\n self.enqueued = []\n\n def enqueue_request(self, request: Request) -> bool:\n self.enqueued.append(request)\n return True\n\n def signal_handler(request: Request, spider: Spider) -> None:\n if \"drop\" in request.url:\n raise IgnoreRequest\n\n crawler = get_crawler(MySpider)\n engine = ExecutionEngine(crawler, lambda _: None)\n scheduler = TestScheduler()\n\n async def start():\n return\n yield\n\n engine._start = start()\n engine._slot = _Slot(False, Mock(), scheduler)\n crawler.signals.connect(signal_handler, signals.request_scheduled)\n keep_request = Request(\"https://keep.example\")\n engine._schedule_request(keep_request)\n drop_request = Request(\"https://drop.example\")\n caplog.set_level(DEBUG)\n engine._schedule_request(drop_request)\n assert scheduler.enqueued == [keep_request], (\n f\"{scheduler.enqueued!r} != [{keep_request!r}]\"\n )\n crawler.signals.disconnect(signal_handler, signals.request_scheduled)\n\n\nclass TestEngineCloseSpider:\n \"\"\"Tests for exception handling coverage during close_spider_async().\"\"\"\n\n @pytest.fixture\n def crawler(self) -> Crawler:\n crawler = get_crawler(DefaultSpider)\n crawler.spider = crawler._create_spider()\n return crawler\n\n @coroutine_test\n async def test_no_slot(self, crawler: Crawler) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n slot = engine._slot\n engine._slot = None\n with pytest.raises(RuntimeError, match=\"Engine slot not assigned\"):\n await engine.close_spider_async()\n # close it correctly\n engine._slot = slot\n await engine.close_spider_async()\n\n @coroutine_test\n async def test_no_spider(self, crawler: Crawler) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n with pytest.raises(RuntimeError, match=\"Spider not opened\"):\n await engine.close_spider_async()\n engine.downloader.close() # cleanup\n\n @coroutine_test\n async def test_exception_slot(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n assert engine._slot\n del engine._slot.heartbeat\n await engine.close_spider_async()\n assert \"Slot close failure\" in caplog.text\n\n @coroutine_test\n async def test_exception_downloader(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n del engine.downloader.slots\n await engine.close_spider_async()\n assert \"Downloader close failure\" in caplog.text\n\n @coroutine_test\n async def test_exception_scraper(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n engine.scraper.slot = None\n await engine.close_spider_async()\n assert \"Scraper close failure\" in caplog.text\n\n @coroutine_test\n async def test_exception_scheduler(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n assert engine._slot\n del cast(\"Scheduler\", engine._slot.scheduler).dqs\n await engine.close_spider_async()\n assert \"Scheduler close failure\" in caplog.text\n\n @coroutine_test\n async def test_exception_signal(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n signal_manager = engine.signals\n del engine.signals\n await engine.close_spider_async()\n assert \"Error while sending spider_close signal\" in caplog.text\n # send the spider_closed signal to close various components\n await signal_manager.send_catch_log_async(\n signal=signals.spider_closed,\n spider=engine.spider,\n reason=\"cancelled\",\n )\n\n @coroutine_test\n async def test_exception_stats(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: None)\n crawler.engine = engine\n await engine.open_spider_async()\n del cast(\"MemoryStatsCollector\", crawler.stats).spider_stats\n await engine.close_spider_async()\n assert \"Stats close failure\" in caplog.text\n\n @coroutine_test\n async def test_exception_callback(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n engine = ExecutionEngine(crawler, lambda _: defer.fail(ValueError()))\n crawler.engine = engine\n await engine.open_spider_async()\n await engine.close_spider_async()\n assert \"Error running spider_closed_callback\" in caplog.text\n\n @coroutine_test\n async def test_exception_async_callback(\n self, crawler: Crawler, caplog: pytest.LogCaptureFixture\n ) -> None:\n async def cb(_):\n raise ValueError\n\n engine = ExecutionEngine(crawler, cb)\n crawler.engine = engine\n await engine.open_spider_async()\n await engine.close_spider_async()\n assert \"Error running spider_closed_callback\" in caplog.text\n", "framework": "pytest", "test_command": "pytest tests/test_engine.py::TestEngineDownloadAsync::test_download_async_redirect -xvs"}, {"test_file": "tests/test_commands.py", "test_function": "TestMiscCommands.test_list", "test_content": "from __future__ import annotations\n\nimport argparse\nimport json\nfrom io import StringIO\nfrom shutil import copytree\nfrom typing import TYPE_CHECKING\nfrom unittest import mock\n\nimport pytest\n\nimport scrapy\nfrom scrapy.cmdline import _pop_command_name, _print_unknown_command_msg\nfrom scrapy.commands import ScrapyCommand, ScrapyHelpFormatter, view\nfrom scrapy.settings import Settings\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass EmptyCommand(ScrapyCommand):\n def short_desc(self) -> str:\n return \"\"\n\n def run(self, args: list[str], opts: argparse.Namespace) -> None:\n pass\n\n\nclass TestCommandSettings:\n def setup_method(self):\n self.command = EmptyCommand()\n self.command.settings = Settings()\n self.parser = argparse.ArgumentParser(\n formatter_class=ScrapyHelpFormatter, conflict_handler=\"resolve\"\n )\n self.command.add_options(self.parser)\n\n def test_settings_json_string(self):\n feeds_json = '{\"data.json\": {\"format\": \"json\"}, \"data.xml\": {\"format\": \"xml\"}}'\n opts, args = self.parser.parse_known_args(\n args=[\"-s\", f\"FEEDS={feeds_json}\", \"spider.py\"]\n )\n self.command.process_options(args, opts)\n assert isinstance(self.command.settings[\"FEEDS\"], scrapy.settings.BaseSettings)\n assert dict(self.command.settings[\"FEEDS\"]) == json.loads(feeds_json)\n\n def test_help_formatter(self):\n formatter = ScrapyHelpFormatter(prog=\"scrapy\")\n part_strings = [\n \"usage: scrapy genspider [options] <name> <domain>\\n\\n\",\n \"\\n\",\n \"optional arguments:\\n\",\n \"\\n\",\n \"Global Options:\\n\",\n ]\n assert formatter._join_parts(part_strings) == (\n \"Usage\\n=====\\n scrapy genspider [options] <name> <domain>\\n\\n\\n\"\n \"Optional Arguments\\n==================\\n\\n\"\n \"Global Options\\n--------------\\n\"\n )\n\n\nclass TestProjectBase:\n \"\"\"A base class for tests that may need a Scrapy project.\"\"\"\n\n project_name = \"testproject\"\n\n @pytest.fixture(scope=\"session\")\n def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path:\n \"\"\"Create a Scrapy project in a temporary directory and return its path.\n\n Used as a cache for ``proj_path``.\n \"\"\"\n tmp_path = tmp_path_factory.mktemp(\"proj\")\n call(\"startproject\", self.project_name, cwd=tmp_path)\n return tmp_path / self.project_name\n\n @pytest.fixture\n def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path:\n \"\"\"Copy a pre-generated Scrapy project into a temporary directory and return its path.\"\"\"\n proj_path = tmp_path / self.project_name\n copytree(_proj_path_cached, proj_path)\n return proj_path\n\n\nclass TestCommandCrawlerProcess(TestProjectBase):\n \"\"\"Test that the command uses the expected kind of *CrawlerProcess\n and produces expected errors when needed.\"\"\"\n\n name = \"crawl\"\n NORMAL_MSG = \"Using CrawlerProcess\"\n ASYNC_MSG = \"Using AsyncCrawlerProcess\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"sp.py\").write_text(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'sp'\n\n custom_settings = {}\n\n async def start(self):\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n (proj_mod_path / \"spiders\" / \"aiosp.py\").write_text(\"\"\"\nimport asyncio\n\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'aiosp'\n\n custom_settings = {}\n\n async def start(self):\n await asyncio.sleep(0.01)\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n self._append_settings(proj_mod_path, \"LOG_LEVEL = 'DEBUG'\\n\")\n\n @staticmethod\n def _append_settings(proj_mod_path: Path, text: str) -> None:\n \"\"\"Add text to the end of the project settings.py.\"\"\"\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(text)\n\n @staticmethod\n def _replace_custom_settings(\n proj_mod_path: Path, spider_name: str, text: str\n ) -> None:\n \"\"\"Replace custom_settings in the given spider file with the given text.\"\"\"\n spider_path = proj_mod_path / \"spiders\" / f\"{spider_name}.py\"\n with spider_path.open(\"r+\", encoding=\"utf-8\") as f:\n content = f.read()\n content = content.replace(\n \"custom_settings = {}\", f\"custom_settings = {text}\"\n )\n f.seek(0)\n f.write(content)\n f.truncate()\n\n def _assert_spider_works(self, msg: str, proj_path: Path, *args: str) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider works.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"It works!\" in err\n assert \"Spider closed (finished)\" in err\n\n def _assert_spider_asyncio_fail(\n self, msg: str, proj_path: Path, *args: str\n ) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider fails to use asyncio.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"no running event loop\" in err\n\n def test_project_settings(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project default settings (to the asyncio value).\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the asyncio value.\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(\n self.ASYNC_MSG,\n proj_path,\n spider,\n \"-s\",\n f\"TWISTED_REACTOR={_asyncio_reactor_path}\",\n )\n\n def test_project_settings_explicit_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor explicitly is set via the project settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_settings_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_spider_settings_asyncio_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value\n and via command line to the empty value. The command line value takes\n precedence so the spider settings don't matter.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_empty_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value\n and via the spider settings to the asyncio value. CrawlerProcess is\n chosen based on the project settings, but the asyncio reactor is chosen\n based on the spider settings.\n\n CrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.NORMAL_MSG, proj_path, spider)\n\n def test_project_asyncio_spider_settings_select(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value. AsyncCrawlerProcess\n is chosen based on the project settings, and the conflicting reactor\n setting in the spider settings causes an exception.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders produce a\n mismatched reactor exception.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n _, _, err = proc(self.name, spider, cwd=proj_path)\n assert self.ASYNC_MSG in err\n assert (\n \"The installed reactor (twisted.internet.asyncioreactor.AsyncioSelectorReactor)\"\n \" does not match the requested one\"\n \" (twisted.internet.selectreactor.SelectReactor)\"\n ) in err\n\n def test_project_asyncio_spider_settings_select_forced(\n self, proj_path: Path\n ) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value, CrawlerProcess is\n forced via the project settings. The reactor is chosen based on the\n spider settings.\n\n CrawlerProcess, the select reactor, only the normal spider works.\"\"\"\n self._append_settings(\n proj_path / self.project_name, \"FORCE_CRAWLER_PROCESS = True\\n\"\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(self.NORMAL_MSG, proj_path, \"aiosp\")\n\n\nclass TestMiscCommands(TestProjectBase):\n def test_list(self, proj_path: Path) -> None:\n assert call(\"list\", cwd=proj_path) == 0\n\n def test_list_subdir(self, proj_path: Path) -> None:\n \"\"\"Test that commands work in a subdirectory of the project.\"\"\"\n subdir = proj_path / \"subdir\"\n subdir.mkdir(exist_ok=True)\n assert call(\"list\", cwd=subdir) == 0\n\n def test_command_not_found(self) -> None:\n na_msg = \"\"\"\nThe list command is not available from this location.\nThese commands are only available from within a project: check, crawl, edit, list, parse.\n\"\"\"\n not_found_msg = \"\"\"\nUnknown command: abc\n\"\"\"\n params = [\n (\"list\", False, na_msg),\n (\"abc\", False, not_found_msg),\n (\"abc\", True, not_found_msg),\n ]\n for cmdname, inproject, message in params:\n with mock.patch(\"sys.stdout\", new=StringIO()) as out:\n _print_unknown_command_msg(Settings(), cmdname, inproject)\n assert out.getvalue().strip() == message.strip()\n\n\nclass TestBenchCommand:\n def test_run(self) -> None:\n _, _, err = proc(\n \"bench\",\n \"-s\",\n \"LOGSTATS_INTERVAL=0.001\",\n \"-s\",\n \"CLOSESPIDER_TIMEOUT=0.01\",\n )\n assert \"INFO: Crawled\" in err\n assert \"Unhandled Error\" not in err\n assert \"log_count/ERROR\" not in err\n\n\nclass TestViewCommand:\n def test_methods(self) -> None:\n command = view.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n prefix_chars=\"-\",\n formatter_class=ScrapyHelpFormatter,\n conflict_handler=\"resolve\",\n )\n command.add_options(parser)\n assert command.short_desc() == \"Open URL in browser, as seen by Scrapy\"\n assert \"URL using the Scrapy downloader and show its\" in command.long_desc()\n\n\nclass TestHelpMessage(TestProjectBase):\n COMMANDS = [\n \"parse\",\n \"startproject\",\n \"view\",\n \"crawl\",\n \"edit\",\n \"list\",\n \"fetch\",\n \"settings\",\n \"shell\",\n \"runspider\",\n \"version\",\n \"genspider\",\n \"check\",\n \"bench\",\n ]\n\n def test_help_messages(self, proj_path: Path) -> None:\n for command in self.COMMANDS:\n _, out, _ = proc(command, \"-h\", cwd=proj_path)\n assert \"Usage\" in out\n\n\nclass TestPopCommandName:\n def test_valid_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"my_spider\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"my_spider\"]\n\n def test_no_command(self) -> None:\n argv = [\"scrapy\"]\n command = _pop_command_name(argv)\n assert command is None\n assert argv == [\"scrapy\"]\n\n def test_option_before_command(self) -> None:\n argv = [\"scrapy\", \"-h\", \"crawl\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n\n def test_option_after_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"-h\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n", "framework": "pytest", "test_command": "pytest tests/test_commands.py::TestMiscCommands::test_list -xvs"}, {"test_file": "tests/test_commands.py", "test_function": "TestMiscCommands.test_list_subdir", "test_content": "from __future__ import annotations\n\nimport argparse\nimport json\nfrom io import StringIO\nfrom shutil import copytree\nfrom typing import TYPE_CHECKING\nfrom unittest import mock\n\nimport pytest\n\nimport scrapy\nfrom scrapy.cmdline import _pop_command_name, _print_unknown_command_msg\nfrom scrapy.commands import ScrapyCommand, ScrapyHelpFormatter, view\nfrom scrapy.settings import Settings\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass EmptyCommand(ScrapyCommand):\n def short_desc(self) -> str:\n return \"\"\n\n def run(self, args: list[str], opts: argparse.Namespace) -> None:\n pass\n\n\nclass TestCommandSettings:\n def setup_method(self):\n self.command = EmptyCommand()\n self.command.settings = Settings()\n self.parser = argparse.ArgumentParser(\n formatter_class=ScrapyHelpFormatter, conflict_handler=\"resolve\"\n )\n self.command.add_options(self.parser)\n\n def test_settings_json_string(self):\n feeds_json = '{\"data.json\": {\"format\": \"json\"}, \"data.xml\": {\"format\": \"xml\"}}'\n opts, args = self.parser.parse_known_args(\n args=[\"-s\", f\"FEEDS={feeds_json}\", \"spider.py\"]\n )\n self.command.process_options(args, opts)\n assert isinstance(self.command.settings[\"FEEDS\"], scrapy.settings.BaseSettings)\n assert dict(self.command.settings[\"FEEDS\"]) == json.loads(feeds_json)\n\n def test_help_formatter(self):\n formatter = ScrapyHelpFormatter(prog=\"scrapy\")\n part_strings = [\n \"usage: scrapy genspider [options] <name> <domain>\\n\\n\",\n \"\\n\",\n \"optional arguments:\\n\",\n \"\\n\",\n \"Global Options:\\n\",\n ]\n assert formatter._join_parts(part_strings) == (\n \"Usage\\n=====\\n scrapy genspider [options] <name> <domain>\\n\\n\\n\"\n \"Optional Arguments\\n==================\\n\\n\"\n \"Global Options\\n--------------\\n\"\n )\n\n\nclass TestProjectBase:\n \"\"\"A base class for tests that may need a Scrapy project.\"\"\"\n\n project_name = \"testproject\"\n\n @pytest.fixture(scope=\"session\")\n def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path:\n \"\"\"Create a Scrapy project in a temporary directory and return its path.\n\n Used as a cache for ``proj_path``.\n \"\"\"\n tmp_path = tmp_path_factory.mktemp(\"proj\")\n call(\"startproject\", self.project_name, cwd=tmp_path)\n return tmp_path / self.project_name\n\n @pytest.fixture\n def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path:\n \"\"\"Copy a pre-generated Scrapy project into a temporary directory and return its path.\"\"\"\n proj_path = tmp_path / self.project_name\n copytree(_proj_path_cached, proj_path)\n return proj_path\n\n\nclass TestCommandCrawlerProcess(TestProjectBase):\n \"\"\"Test that the command uses the expected kind of *CrawlerProcess\n and produces expected errors when needed.\"\"\"\n\n name = \"crawl\"\n NORMAL_MSG = \"Using CrawlerProcess\"\n ASYNC_MSG = \"Using AsyncCrawlerProcess\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"sp.py\").write_text(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'sp'\n\n custom_settings = {}\n\n async def start(self):\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n (proj_mod_path / \"spiders\" / \"aiosp.py\").write_text(\"\"\"\nimport asyncio\n\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'aiosp'\n\n custom_settings = {}\n\n async def start(self):\n await asyncio.sleep(0.01)\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n self._append_settings(proj_mod_path, \"LOG_LEVEL = 'DEBUG'\\n\")\n\n @staticmethod\n def _append_settings(proj_mod_path: Path, text: str) -> None:\n \"\"\"Add text to the end of the project settings.py.\"\"\"\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(text)\n\n @staticmethod\n def _replace_custom_settings(\n proj_mod_path: Path, spider_name: str, text: str\n ) -> None:\n \"\"\"Replace custom_settings in the given spider file with the given text.\"\"\"\n spider_path = proj_mod_path / \"spiders\" / f\"{spider_name}.py\"\n with spider_path.open(\"r+\", encoding=\"utf-8\") as f:\n content = f.read()\n content = content.replace(\n \"custom_settings = {}\", f\"custom_settings = {text}\"\n )\n f.seek(0)\n f.write(content)\n f.truncate()\n\n def _assert_spider_works(self, msg: str, proj_path: Path, *args: str) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider works.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"It works!\" in err\n assert \"Spider closed (finished)\" in err\n\n def _assert_spider_asyncio_fail(\n self, msg: str, proj_path: Path, *args: str\n ) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider fails to use asyncio.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"no running event loop\" in err\n\n def test_project_settings(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project default settings (to the asyncio value).\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the asyncio value.\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(\n self.ASYNC_MSG,\n proj_path,\n spider,\n \"-s\",\n f\"TWISTED_REACTOR={_asyncio_reactor_path}\",\n )\n\n def test_project_settings_explicit_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor explicitly is set via the project settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_settings_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_spider_settings_asyncio_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value\n and via command line to the empty value. The command line value takes\n precedence so the spider settings don't matter.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_empty_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value\n and via the spider settings to the asyncio value. CrawlerProcess is\n chosen based on the project settings, but the asyncio reactor is chosen\n based on the spider settings.\n\n CrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.NORMAL_MSG, proj_path, spider)\n\n def test_project_asyncio_spider_settings_select(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value. AsyncCrawlerProcess\n is chosen based on the project settings, and the conflicting reactor\n setting in the spider settings causes an exception.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders produce a\n mismatched reactor exception.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n _, _, err = proc(self.name, spider, cwd=proj_path)\n assert self.ASYNC_MSG in err\n assert (\n \"The installed reactor (twisted.internet.asyncioreactor.AsyncioSelectorReactor)\"\n \" does not match the requested one\"\n \" (twisted.internet.selectreactor.SelectReactor)\"\n ) in err\n\n def test_project_asyncio_spider_settings_select_forced(\n self, proj_path: Path\n ) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value, CrawlerProcess is\n forced via the project settings. The reactor is chosen based on the\n spider settings.\n\n CrawlerProcess, the select reactor, only the normal spider works.\"\"\"\n self._append_settings(\n proj_path / self.project_name, \"FORCE_CRAWLER_PROCESS = True\\n\"\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(self.NORMAL_MSG, proj_path, \"aiosp\")\n\n\nclass TestMiscCommands(TestProjectBase):\n def test_list(self, proj_path: Path) -> None:\n assert call(\"list\", cwd=proj_path) == 0\n\n def test_list_subdir(self, proj_path: Path) -> None:\n \"\"\"Test that commands work in a subdirectory of the project.\"\"\"\n subdir = proj_path / \"subdir\"\n subdir.mkdir(exist_ok=True)\n assert call(\"list\", cwd=subdir) == 0\n\n def test_command_not_found(self) -> None:\n na_msg = \"\"\"\nThe list command is not available from this location.\nThese commands are only available from within a project: check, crawl, edit, list, parse.\n\"\"\"\n not_found_msg = \"\"\"\nUnknown command: abc\n\"\"\"\n params = [\n (\"list\", False, na_msg),\n (\"abc\", False, not_found_msg),\n (\"abc\", True, not_found_msg),\n ]\n for cmdname, inproject, message in params:\n with mock.patch(\"sys.stdout\", new=StringIO()) as out:\n _print_unknown_command_msg(Settings(), cmdname, inproject)\n assert out.getvalue().strip() == message.strip()\n\n\nclass TestBenchCommand:\n def test_run(self) -> None:\n _, _, err = proc(\n \"bench\",\n \"-s\",\n \"LOGSTATS_INTERVAL=0.001\",\n \"-s\",\n \"CLOSESPIDER_TIMEOUT=0.01\",\n )\n assert \"INFO: Crawled\" in err\n assert \"Unhandled Error\" not in err\n assert \"log_count/ERROR\" not in err\n\n\nclass TestViewCommand:\n def test_methods(self) -> None:\n command = view.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n prefix_chars=\"-\",\n formatter_class=ScrapyHelpFormatter,\n conflict_handler=\"resolve\",\n )\n command.add_options(parser)\n assert command.short_desc() == \"Open URL in browser, as seen by Scrapy\"\n assert \"URL using the Scrapy downloader and show its\" in command.long_desc()\n\n\nclass TestHelpMessage(TestProjectBase):\n COMMANDS = [\n \"parse\",\n \"startproject\",\n \"view\",\n \"crawl\",\n \"edit\",\n \"list\",\n \"fetch\",\n \"settings\",\n \"shell\",\n \"runspider\",\n \"version\",\n \"genspider\",\n \"check\",\n \"bench\",\n ]\n\n def test_help_messages(self, proj_path: Path) -> None:\n for command in self.COMMANDS:\n _, out, _ = proc(command, \"-h\", cwd=proj_path)\n assert \"Usage\" in out\n\n\nclass TestPopCommandName:\n def test_valid_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"my_spider\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"my_spider\"]\n\n def test_no_command(self) -> None:\n argv = [\"scrapy\"]\n command = _pop_command_name(argv)\n assert command is None\n assert argv == [\"scrapy\"]\n\n def test_option_before_command(self) -> None:\n argv = [\"scrapy\", \"-h\", \"crawl\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n\n def test_option_after_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"-h\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n", "framework": "pytest", "test_command": "pytest tests/test_commands.py::TestMiscCommands::test_list_subdir -xvs"}, {"test_file": "tests/test_feedexport.py", "test_function": "TestS3FeedStorage.test_store", "test_content": "from __future__ import annotations\n\nimport bz2\nimport csv\nimport gzip\nimport json\nimport lzma\nimport marshal\nimport os\nimport pickle\nimport random\nimport shutil\nimport string\nimport sys\nimport tempfile\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom io import BytesIO\nfrom logging import getLogger\nfrom pathlib import Path\nfrom string import ascii_letters, digits\nfrom typing import IO, TYPE_CHECKING, Any\nfrom unittest import mock\nfrom urllib.parse import quote, urljoin\nfrom urllib.request import pathname2url\n\nimport lxml.etree\nimport pytest\nfrom packaging.version import Version\nfrom testfixtures import LogCapture\nfrom w3lib.url import file_uri_to_path, path_to_file_uri\nfrom zope.interface import implementer\nfrom zope.interface.verify import verifyObject\n\nimport scrapy\nfrom scrapy import Spider, signals\nfrom scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning\nfrom scrapy.exporters import CsvItemExporter, JsonItemExporter\nfrom scrapy.extensions.feedexport import (\n BlockingFeedStorage,\n FeedExporter,\n FeedSlot,\n FileFeedStorage,\n FTPFeedStorage,\n GCSFeedStorage,\n IFeedStorage,\n S3FeedStorage,\n StdoutFeedStorage,\n)\nfrom scrapy.settings import Settings\nfrom scrapy.utils.defer import maybe_deferred_to_future\nfrom scrapy.utils.python import to_unicode\nfrom scrapy.utils.test import get_crawler\nfrom tests.mockserver.ftp import MockFTPServer\nfrom tests.mockserver.http import MockServer\nfrom tests.spiders import ItemSpider\nfrom tests.utils.decorators import coroutine_test, inline_callbacks_test\n\nif TYPE_CHECKING:\n from collections.abc import Callable, Iterable\n from os import PathLike\n\n\ndef path_to_url(path):\n return urljoin(\"file:\", pathname2url(str(path)))\n\n\ndef printf_escape(string):\n return string.replace(\"%\", \"%%\")\n\n\ndef build_url(path: str | PathLike) -> str:\n path_str = str(path)\n if path_str[0] != \"/\":\n path_str = \"/\" + path_str\n return urljoin(\"file:\", path_str)\n\n\ndef mock_google_cloud_storage() -> tuple[Any, Any, Any]:\n \"\"\"Creates autospec mocks for google-cloud-storage Client, Bucket and Blob\n classes and set their proper return values.\n \"\"\"\n from google.cloud.storage import Blob, Bucket, Client # noqa: PLC0415\n\n client_mock = mock.create_autospec(Client)\n\n bucket_mock = mock.create_autospec(Bucket)\n client_mock.get_bucket.return_value = bucket_mock\n\n blob_mock = mock.create_autospec(Blob)\n bucket_mock.blob.return_value = blob_mock\n\n return (client_mock, bucket_mock, blob_mock)\n\n\nclass TestFileFeedStorage:\n def test_store_file_uri(self, tmp_path):\n path = tmp_path / \"file.txt\"\n uri = path_to_file_uri(str(path))\n self._assert_stores(FileFeedStorage(uri), path)\n\n def test_store_file_uri_makedirs(self, tmp_path):\n path = tmp_path / \"more\" / \"paths\" / \"file.txt\"\n uri = path_to_file_uri(str(path))\n self._assert_stores(FileFeedStorage(uri), path)\n\n def test_store_direct_path(self, tmp_path):\n path = tmp_path / \"file.txt\"\n self._assert_stores(FileFeedStorage(str(path)), path)\n\n def test_store_direct_path_relative(self, tmp_path):\n old_cwd = Path.cwd()\n try:\n os.chdir(tmp_path)\n path = Path(\"foo\", \"bar\")\n self._assert_stores(FileFeedStorage(str(path)), path)\n finally:\n os.chdir(old_cwd)\n\n def test_interface(self, tmp_path):\n path = tmp_path / \"file.txt\"\n st = FileFeedStorage(str(path))\n verifyObject(IFeedStorage, st)\n\n @staticmethod\n def _store(path: Path, feed_options: dict[str, Any] | None = None) -> None:\n storage = FileFeedStorage(str(path), feed_options=feed_options)\n spider = scrapy.Spider(\"default\")\n file = storage.open(spider)\n file.write(b\"content\")\n storage.store(file)\n\n def test_append(self, tmp_path):\n path = tmp_path / \"file.txt\"\n self._store(path)\n self._assert_stores(FileFeedStorage(str(path)), path, b\"contentcontent\")\n\n def test_overwrite(self, tmp_path):\n path = tmp_path / \"file.txt\"\n self._store(path, {\"overwrite\": True})\n self._assert_stores(\n FileFeedStorage(str(path), feed_options={\"overwrite\": True}), path\n )\n\n @staticmethod\n def _assert_stores(\n storage: FileFeedStorage, path: Path, expected_content: bytes = b\"content\"\n ) -> None:\n spider = scrapy.Spider(\"default\")\n file = storage.open(spider)\n file.write(b\"content\")\n storage.store(file)\n assert path.exists()\n try:\n assert path.read_bytes() == expected_content\n finally:\n path.unlink()\n\n def test_preserves_windows_path_without_file_scheme(self):\n path = r\"C:\\Users\\user\\Desktop\\test.txt\"\n storage = FileFeedStorage(path)\n assert storage.path == path\n\n\n@pytest.mark.requires_reactor # needs a reactor for BlockingFeedStorage\nclass TestFTPFeedStorage:\n def get_test_spider(self, settings=None):\n class TestSpider(scrapy.Spider):\n name = \"test_spider\"\n\n crawler = get_crawler(settings_dict=settings)\n return TestSpider.from_crawler(crawler)\n\n async def _store(self, uri, content, feed_options=None, settings=None):\n crawler = get_crawler(settings_dict=settings or {})\n storage = FTPFeedStorage.from_crawler(\n crawler,\n uri,\n feed_options=feed_options,\n )\n verifyObject(IFeedStorage, storage)\n spider = self.get_test_spider()\n file = storage.open(spider)\n file.write(content)\n await maybe_deferred_to_future(storage.store(file))\n\n def _assert_stored(self, path: Path, content):\n assert path.exists()\n try:\n assert path.read_bytes() == content\n finally:\n path.unlink()\n\n @coroutine_test\n async def test_append(self):\n with MockFTPServer() as ftp_server:\n filename = \"file\"\n url = ftp_server.url(filename)\n feed_options = {\"overwrite\": False}\n await self._store(url, b\"foo\", feed_options=feed_options)\n await self._store(url, b\"bar\", feed_options=feed_options)\n self._assert_stored(ftp_server.path / filename, b\"foobar\")\n\n @coroutine_test\n async def test_overwrite(self):\n with MockFTPServer() as ftp_server:\n filename = \"file\"\n url = ftp_server.url(filename)\n await self._store(url, b\"foo\")\n await self._store(url, b\"bar\")\n self._assert_stored(ftp_server.path / filename, b\"bar\")\n\n @coroutine_test\n async def test_append_active_mode(self):\n with MockFTPServer() as ftp_server:\n settings = {\"FEED_STORAGE_FTP_ACTIVE\": True}\n filename = \"file\"\n url = ftp_server.url(filename)\n feed_options = {\"overwrite\": False}\n await self._store(url, b\"foo\", feed_options=feed_options, settings=settings)\n await self._store(url, b\"bar\", feed_options=feed_options, settings=settings)\n self._assert_stored(ftp_server.path / filename, b\"foobar\")\n\n @coroutine_test\n async def test_overwrite_active_mode(self):\n with MockFTPServer() as ftp_server:\n settings = {\"FEED_STORAGE_FTP_ACTIVE\": True}\n filename = \"file\"\n url = ftp_server.url(filename)\n await self._store(url, b\"foo\", settings=settings)\n await self._store(url, b\"bar\", settings=settings)\n self._assert_stored(ftp_server.path / filename, b\"bar\")\n\n def test_uri_auth_quote(self):\n # RFC3986: 3.2.1. User Information\n pw_quoted = quote(string.punctuation, safe=\"\")\n st = FTPFeedStorage(f\"ftp://foo:{pw_quoted}@example.com/some_path\", {})\n assert st.password == string.punctuation\n\n\nclass MyBlockingFeedStorage(BlockingFeedStorage):\n def _store_in_thread(self, file: IO[bytes]) -> None:\n return\n\n\nclass TestBlockingFeedStorage:\n def get_test_spider(self, settings=None):\n class TestSpider(scrapy.Spider):\n name = \"test_spider\"\n\n crawler = get_crawler(settings_dict=settings)\n return TestSpider.from_crawler(crawler)\n\n def test_default_temp_dir(self):\n b = MyBlockingFeedStorage()\n\n storage_file = b.open(self.get_test_spider())\n storage_dir = Path(storage_file.name).parent\n assert str(storage_dir) == tempfile.gettempdir()\n\n def test_temp_file(self, tmp_path):\n b = MyBlockingFeedStorage()\n\n spider = self.get_test_spider({\"FEED_TEMPDIR\": str(tmp_path)})\n storage_file = b.open(spider)\n storage_dir = Path(storage_file.name).parent\n assert storage_dir == tmp_path\n\n def test_invalid_folder(self, tmp_path):\n b = MyBlockingFeedStorage()\n\n invalid_path = tmp_path / \"invalid_path\"\n spider = self.get_test_spider({\"FEED_TEMPDIR\": str(invalid_path)})\n\n with pytest.raises(OSError, match=\"Not a Directory:\"):\n b.open(spider=spider)\n\n\n@pytest.mark.requires_boto3\n@pytest.mark.requires_reactor # needs a reactor for BlockingFeedStorage\nclass TestS3FeedStorage:\n def test_parse_credentials(self):\n aws_credentials = {\n \"AWS_ACCESS_KEY_ID\": \"settings_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"settings_secret\",\n \"AWS_SESSION_TOKEN\": \"settings_token\",\n }\n crawler = get_crawler(settings_dict=aws_credentials)\n # Instantiate with crawler\n storage = S3FeedStorage.from_crawler(\n crawler,\n \"s3://mybucket/export.csv\",\n )\n assert storage.access_key == \"settings_key\"\n assert storage.secret_key == \"settings_secret\"\n assert storage.session_token == \"settings_token\"\n # Instantiate directly\n storage = S3FeedStorage(\n \"s3://mybucket/export.csv\",\n aws_credentials[\"AWS_ACCESS_KEY_ID\"],\n aws_credentials[\"AWS_SECRET_ACCESS_KEY\"],\n session_token=aws_credentials[\"AWS_SESSION_TOKEN\"],\n )\n assert storage.access_key == \"settings_key\"\n assert storage.secret_key == \"settings_secret\"\n assert storage.session_token == \"settings_token\"\n # URI priority > settings priority\n storage = S3FeedStorage(\n \"s3://uri_key:uri_secret@mybucket/export.csv\",\n aws_credentials[\"AWS_ACCESS_KEY_ID\"],\n aws_credentials[\"AWS_SECRET_ACCESS_KEY\"],\n )\n assert storage.access_key == \"uri_key\"\n assert storage.secret_key == \"uri_secret\"\n\n @coroutine_test\n async def test_store(self):\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n }\n crawler = get_crawler(settings_dict=settings)\n bucket = \"mybucket\"\n key = \"export.csv\"\n storage = S3FeedStorage.from_crawler(crawler, f\"s3://{bucket}/{key}\")\n verifyObject(IFeedStorage, storage)\n\n file = mock.MagicMock()\n\n storage.s3_client = mock.MagicMock()\n await maybe_deferred_to_future(storage.store(file))\n assert storage.s3_client.upload_fileobj.call_args == mock.call(\n Bucket=bucket, Key=key, Fileobj=file\n )\n\n def test_init_without_acl(self):\n storage = S3FeedStorage(\"s3://mybucket/export.csv\", \"access_key\", \"secret_key\")\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.acl is None\n\n def test_init_with_acl(self):\n storage = S3FeedStorage(\n \"s3://mybucket/export.csv\", \"access_key\", \"secret_key\", \"custom-acl\"\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.acl == \"custom-acl\"\n\n def test_init_with_endpoint_url(self):\n storage = S3FeedStorage(\n \"s3://mybucket/export.csv\",\n \"access_key\",\n \"secret_key\",\n endpoint_url=\"https://example.com\",\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.endpoint_url == \"https://example.com\"\n\n def test_init_with_region_name(self):\n region_name = \"ap-east-1\"\n storage = S3FeedStorage(\n \"s3://mybucket/export.csv\",\n \"access_key\",\n \"secret_key\",\n region_name=region_name,\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.region_name == region_name\n assert storage.s3_client._client_config.region_name == region_name\n\n def test_from_crawler_without_acl(self):\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(\n crawler,\n \"s3://mybucket/export.csv\",\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.acl is None\n\n def test_without_endpoint_url(self):\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(\n crawler,\n \"s3://mybucket/export.csv\",\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.endpoint_url is None\n\n def test_without_region_name(self):\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(\n crawler,\n \"s3://mybucket/export.csv\",\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.s3_client._client_config.region_name == \"us-east-1\"\n\n def test_from_crawler_with_acl(self):\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n \"FEED_STORAGE_S3_ACL\": \"custom-acl\",\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(\n crawler,\n \"s3://mybucket/export.csv\",\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.acl == \"custom-acl\"\n\n def test_from_crawler_with_endpoint_url(self):\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n \"AWS_ENDPOINT_URL\": \"https://example.com\",\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(crawler, \"s3://mybucket/export.csv\")\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.endpoint_url == \"https://example.com\"\n\n def test_from_crawler_with_region_name(self):\n region_name = \"ap-east-1\"\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n \"AWS_REGION_NAME\": region_name,\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(crawler, \"s3://mybucket/export.csv\")\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.region_name == region_name\n assert storage.s3_client._client_config.region_name == region_name\n\n @coroutine_test\n async def test_store_without_acl(self):\n storage = S3FeedStorage(\n \"s3://mybucket/export.csv\",\n \"access_key\",\n \"secret_key\",\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.acl is None\n\n storage.s3_client = mock.MagicMock()\n await maybe_deferred_to_future(storage.store(BytesIO(b\"test file\")))\n acl = (\n storage.s3_client.upload_fileobj.call_args[1]\n .get(\"ExtraArgs\", {})\n .get(\"ACL\")\n )\n assert acl is None\n\n @coroutine_test\n async def test_store_with_acl(self):\n storage = S3FeedStorage(\n \"s3://mybucket/export.csv\", \"access_key\", \"secret_key\", \"custom-acl\"\n )\n assert storage.access_key == \"access_key\"\n assert storage.secret_key == \"secret_key\"\n assert storage.acl == \"custom-acl\"\n\n storage.s3_client = mock.MagicMock()\n await maybe_deferred_to_future(storage.store(BytesIO(b\"test file\")))\n acl = storage.s3_client.upload_fileobj.call_args[1][\"ExtraArgs\"][\"ACL\"]\n assert acl == \"custom-acl\"\n\n def test_overwrite_default(self):\n with LogCapture() as log:\n S3FeedStorage(\n \"s3://mybucket/export.csv\", \"access_key\", \"secret_key\", \"custom-acl\"\n )\n assert \"S3 does not support appending to files\" not in str(log)\n\n def test_overwrite_false(self):\n with LogCapture() as log:\n S3FeedStorage(\n \"s3://mybucket/export.csv\",\n \"access_key\",\n \"secret_key\",\n \"custom-acl\",\n feed_options={\"overwrite\": False},\n )\n assert \"S3 does not support appending to files\" in str(log)\n\n\n@pytest.mark.requires_reactor # needs a reactor for BlockingFeedStorage\nclass TestGCSFeedStorage:\n def test_parse_settings(self):\n try:\n from google.cloud.storage import Client # noqa: F401,PLC0415\n except ImportError:\n pytest.skip(\"GCSFeedStorage requires google-cloud-storage\")\n\n settings = {\"GCS_PROJECT_ID\": \"123\", \"FEED_STORAGE_GCS_ACL\": \"publicRead\"}\n crawler = get_crawler(settings_dict=settings)\n storage = GCSFeedStorage.from_crawler(crawler, \"gs://mybucket/export.csv\")\n assert storage.project_id == \"123\"\n assert storage.acl == \"publicRead\"\n assert storage.bucket_name == \"mybucket\"\n assert storage.blob_name == \"export.csv\"\n\n def test_parse_empty_acl(self):\n try:\n from google.cloud.storage import Client # noqa: F401,PLC0415\n except ImportError:\n pytest.skip(\"GCSFeedStorage requires google-cloud-storage\")\n\n settings = {\"GCS_PROJECT_ID\": \"123\", \"FEED_STORAGE_GCS_ACL\": \"\"}\n crawler = get_crawler(settings_dict=settings)\n storage = GCSFeedStorage.from_crawler(crawler, \"gs://mybucket/export.csv\")\n assert storage.acl is None\n\n settings = {\"GCS_PROJECT_ID\": \"123\", \"FEED_STORAGE_GCS_ACL\": None}\n crawler = get_crawler(settings_dict=settings)\n storage = GCSFeedStorage.from_crawler(crawler, \"gs://mybucket/export.csv\")\n assert storage.acl is None\n\n @coroutine_test\n async def test_store(self):\n try:\n from google.cloud.storage import Client # noqa: F401,PLC0415\n except ImportError:\n pytest.skip(\"GCSFeedStorage requires google-cloud-storage\")\n\n uri = \"gs://mybucket/export.csv\"\n project_id = \"myproject-123\"\n acl = \"publicRead\"\n (client_mock, bucket_mock, blob_mock) = mock_google_cloud_storage()\n with mock.patch(\"google.cloud.storage.Client\") as m:\n m.return_value = client_mock\n\n f = mock.Mock()\n storage = GCSFeedStorage(uri, project_id, acl)\n await maybe_deferred_to_future(storage.store(f))\n\n f.seek.assert_called_once_with(0)\n m.assert_called_once_with(project=project_id)\n client_mock.get_bucket.assert_called_once_with(\"mybucket\")\n bucket_mock.blob.assert_called_once_with(\"export.csv\")\n blob_mock.upload_from_file.assert_called_once_with(f, predefined_acl=acl)\n\n def test_overwrite_default(self):\n with LogCapture() as log:\n GCSFeedStorage(\"gs://mybucket/export.csv\", \"myproject-123\", \"custom-acl\")\n assert \"GCS does not support appending to files\" not in str(log)\n\n def test_overwrite_false(self):\n with LogCapture() as log:\n GCSFeedStorage(\n \"gs://mybucket/export.csv\",\n \"myproject-123\",\n \"custom-acl\",\n feed_options={\"overwrite\": False},\n )\n assert \"GCS does not support appending to files\" in str(log)\n\n\nclass TestStdoutFeedStorage:\n def test_store(self):\n out = BytesIO()\n storage = StdoutFeedStorage(\"stdout:\", _stdout=out)\n file = storage.open(scrapy.Spider(\"default\"))\n file.write(b\"content\")\n storage.store(file)\n assert out.getvalue() == b\"content\"\n\n def test_overwrite_default(self):\n with LogCapture() as log:\n StdoutFeedStorage(\"stdout:\")\n assert (\n \"Standard output (stdout) storage does not support overwriting\"\n not in str(log)\n )\n\n def test_overwrite_true(self):\n with LogCapture() as log:\n StdoutFeedStorage(\"stdout:\", feed_options={\"overwrite\": True})\n assert \"Standard output (stdout) storage does not support overwriting\" in str(\n log\n )\n\n\nclass FromCrawlerMixin:\n init_with_crawler = False\n\n @classmethod\n def from_crawler(cls, crawler, *args, feed_options=None, **kwargs):\n cls.init_with_crawler = True\n return cls(*args, **kwargs)\n\n\nclass FromCrawlerCsvItemExporter(CsvItemExporter, FromCrawlerMixin):\n pass\n\n\nclass FromCrawlerFileFeedStorage(FileFeedStorage, FromCrawlerMixin):\n @classmethod\n def from_crawler(cls, crawler, *args, feed_options=None, **kwargs):\n cls.init_with_crawler = True\n return cls(*args, feed_options=feed_options, **kwargs)\n\n\nclass DummyBlockingFeedStorage(BlockingFeedStorage):\n def __init__(self, uri, *args, feed_options=None):\n self.path = Path(file_uri_to_path(uri))\n\n def _store_in_thread(self, file):\n dirname = self.path.parent\n if dirname and not dirname.exists():\n dirname.mkdir(parents=True)\n with self.path.open(\"ab\") as output_file:\n output_file.write(file.read())\n\n file.close()\n\n\nclass FailingBlockingFeedStorage(DummyBlockingFeedStorage):\n def _store_in_thread(self, file):\n raise OSError(\"Cannot store\")\n\n\n@implementer(IFeedStorage)\nclass LogOnStoreFileStorage:\n \"\"\"\n This storage logs inside `store` method.\n It can be used to make sure `store` method is invoked.\n \"\"\"\n\n def __init__(self, uri, feed_options=None):\n self.path = file_uri_to_path(uri)\n self.logger = getLogger()\n\n def open(self, spider):\n return tempfile.NamedTemporaryFile(prefix=\"feed-\")\n\n def store(self, file):\n self.logger.info(\"Storage.store is called\")\n file.close()\n\n\nclass TestFeedExportBase(ABC):\n mockserver: MockServer\n\n class MyItem(scrapy.Item):\n foo = scrapy.Field()\n egg = scrapy.Field()\n baz = scrapy.Field()\n\n class MyItem2(scrapy.Item):\n foo = scrapy.Field()\n hello = scrapy.Field()\n\n def _random_temp_filename(self, inter_dir=\"\") -> Path:\n chars = [random.choice(ascii_letters + digits) for _ in range(15)]\n filename = \"\".join(chars)\n return Path(self.temp_dir, inter_dir, filename)\n\n @classmethod\n def setup_class(cls):\n cls.mockserver = MockServer()\n cls.mockserver.__enter__()\n\n @classmethod\n def teardown_class(cls):\n cls.mockserver.__exit__(None, None, None)\n\n def setup_method(self):\n self.temp_dir = tempfile.mkdtemp()\n\n def teardown_method(self):\n shutil.rmtree(self.temp_dir, ignore_errors=True)\n\n async def exported_data(\n self, items: Iterable[Any], settings: dict[str, Any]\n ) -> dict[str, Any]:\n \"\"\"\n Return exported data which a spider yielding ``items`` would return.\n \"\"\"\n\n class TestSpider(scrapy.Spider):\n name = \"testspider\"\n\n def parse(self, response):\n yield from items\n\n return await self.run_and_export(TestSpider, settings)\n\n async def exported_no_data(self, settings: dict[str, Any]) -> dict[str, Any]:\n \"\"\"\n Return exported data which a spider yielding no ``items`` would return.\n \"\"\"\n\n class TestSpider(scrapy.Spider):\n name = \"testspider\"\n\n def parse(self, response):\n pass\n\n return await self.run_and_export(TestSpider, settings)\n\n async def assertExported(\n self,\n items: Iterable[Any],\n header: Iterable[str],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n await self.assertExportedCsv(items, header, rows, settings)\n await self.assertExportedJsonLines(items, rows, settings)\n await self.assertExportedXml(items, rows, settings)\n await self.assertExportedPickle(items, rows, settings)\n await self.assertExportedMarshal(items, rows, settings)\n await self.assertExportedMultiple(items, rows, settings)\n\n async def assertExportedCsv( # noqa: B027\n self,\n items: Iterable[Any],\n header: Iterable[str],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n pass\n\n async def assertExportedJsonLines( # noqa: B027\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n pass\n\n async def assertExportedXml( # noqa: B027\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n pass\n\n async def assertExportedMultiple( # noqa: B027\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n pass\n\n async def assertExportedPickle( # noqa: B027\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n pass\n\n async def assertExportedMarshal( # noqa: B027\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n pass\n\n @abstractmethod\n async def run_and_export(\n self, spider_cls: type[Spider], settings: dict[str, Any]\n ) -> dict[str, Any]:\n pass\n\n def _load_until_eof(self, data, load_func):\n result = []\n with tempfile.TemporaryFile() as temp:\n temp.write(data)\n temp.seek(0)\n while True:\n try:\n result.append(load_func(temp))\n except EOFError:\n break\n return result\n\n\nclass InstrumentedFeedSlot(FeedSlot):\n \"\"\"Instrumented FeedSlot subclass for keeping track of calls to\n start_exporting and finish_exporting.\"\"\"\n\n def start_exporting(self):\n self.update_listener(\"start\")\n super().start_exporting()\n\n def finish_exporting(self):\n self.update_listener(\"finish\")\n super().finish_exporting()\n\n @classmethod\n def subscribe__listener(cls, listener):\n cls.update_listener = listener.update\n\n\nclass IsExportingListener:\n \"\"\"When subscribed to InstrumentedFeedSlot, keeps track of when\n a call to start_exporting has been made without a closing call to\n finish_exporting and when a call to finish_exporting has been made\n before a call to start_exporting.\"\"\"\n\n def __init__(self):\n self.start_without_finish = False\n self.finish_without_start = False\n\n def update(self, method):\n if method == \"start\":\n self.start_without_finish = True\n elif method == \"finish\":\n if self.start_without_finish:\n self.start_without_finish = False\n else:\n self.finish_before_start = True\n\n\nclass ExceptionJsonItemExporter(JsonItemExporter):\n \"\"\"JsonItemExporter that throws an exception every time export_item is called.\"\"\"\n\n def export_item(self, _):\n raise RuntimeError(\"foo\")\n\n\nclass TestFeedExport(TestFeedExportBase):\n async def run_and_export(\n self, spider_cls: type[Spider], settings: dict[str, Any]\n ) -> dict[str, Any]:\n \"\"\"Run spider with specified settings; return exported data.\"\"\"\n\n FEEDS = settings.get(\"FEEDS\") or {}\n settings[\"FEEDS\"] = {\n printf_escape(path_to_url(file_path)): feed_options\n for file_path, feed_options in FEEDS.items()\n }\n\n content: dict[str, Any] = {}\n try:\n spider_cls.start_urls = [self.mockserver.url(\"/\")]\n crawler = get_crawler(spider_cls, settings)\n await crawler.crawl_async()\n\n for file_path, feed_options in FEEDS.items():\n content[feed_options[\"format\"]] = (\n Path(file_path).read_bytes() if Path(file_path).exists() else None\n )\n\n finally:\n for file_path in FEEDS:\n if not Path(file_path).exists():\n continue\n\n Path(file_path).unlink()\n\n return content\n\n async def assertExportedCsv(\n self,\n items: Iterable[Any],\n header: Iterable[str],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"csv\"},\n },\n }\n )\n data = await self.exported_data(items, settings)\n reader = csv.DictReader(to_unicode(data[\"csv\"]).splitlines())\n assert reader.fieldnames == list(header)\n assert rows == list(reader)\n\n async def assertExportedJsonLines(\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"jl\"},\n },\n }\n )\n data = await self.exported_data(items, settings)\n parsed = [json.loads(to_unicode(line)) for line in data[\"jl\"].splitlines()]\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n assert rows == parsed\n\n async def assertExportedXml(\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"xml\"},\n },\n }\n )\n data = await self.exported_data(items, settings)\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n root = lxml.etree.fromstring(data[\"xml\"])\n got_rows = [{e.tag: e.text for e in it} for it in root.findall(\"item\")]\n assert rows == got_rows\n\n async def assertExportedMultiple(\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"xml\"},\n self._random_temp_filename(): {\"format\": \"json\"},\n },\n }\n )\n data = await self.exported_data(items, settings)\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n # XML\n root = lxml.etree.fromstring(data[\"xml\"])\n xml_rows = [{e.tag: e.text for e in it} for it in root.findall(\"item\")]\n assert rows == xml_rows\n # JSON\n json_rows = json.loads(to_unicode(data[\"json\"]))\n assert rows == json_rows\n\n async def assertExportedPickle(\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"pickle\"},\n },\n }\n )\n data = await self.exported_data(items, settings)\n expected = [{k: v for k, v in row.items() if v} for row in rows]\n\n result = self._load_until_eof(data[\"pickle\"], load_func=pickle.load)\n assert result == expected\n\n async def assertExportedMarshal(\n self,\n items: Iterable[Any],\n rows: Iterable[dict[str, Any]],\n settings: dict[str, Any] | None = None,\n ) -> None:\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"marshal\"},\n },\n }\n )\n data = await self.exported_data(items, settings)\n expected = [{k: v for k, v in row.items() if v} for row in rows]\n\n result = self._load_until_eof(data[\"marshal\"], load_func=marshal.load)\n assert result == expected\n\n @inline_callbacks_test\n def test_stats_file_success(self):\n settings = {\n \"FEEDS\": {\n printf_escape(path_to_url(str(self._random_temp_filename()))): {\n \"format\": \"json\",\n }\n },\n }\n crawler = get_crawler(ItemSpider, settings)\n yield crawler.crawl(mockserver=self.mockserver)\n assert \"feedexport/success_count/FileFeedStorage\" in crawler.stats.get_stats()\n assert crawler.stats.get_value(\"feedexport/success_count/FileFeedStorage\") == 1\n\n @inline_callbacks_test\n def test_stats_file_failed(self):\n settings = {\n \"FEEDS\": {\n printf_escape(path_to_url(str(self._random_temp_filename()))): {\n \"format\": \"json\",\n }\n },\n }\n crawler = get_crawler(ItemSpider, settings)\n with mock.patch(\n \"scrapy.extensions.feedexport.FileFeedStorage.store\",\n side_effect=KeyError(\"foo\"),\n ):\n yield crawler.crawl(mockserver=self.mockserver)\n assert \"feedexport/failed_count/FileFeedStorage\" in crawler.stats.get_stats()\n assert crawler.stats.get_value(\"feedexport/failed_count/FileFeedStorage\") == 1\n\n @inline_callbacks_test\n def test_stats_multiple_file(self):\n settings = {\n \"FEEDS\": {\n printf_escape(path_to_url(str(self._random_temp_filename()))): {\n \"format\": \"json\",\n },\n \"stdout:\": {\n \"format\": \"xml\",\n },\n },\n }\n crawler = get_crawler(ItemSpider, settings)\n with mock.patch.object(S3FeedStorage, \"store\"):\n yield crawler.crawl(mockserver=self.mockserver)\n assert \"feedexport/success_count/FileFeedStorage\" in crawler.stats.get_stats()\n assert \"feedexport/success_count/StdoutFeedStorage\" in crawler.stats.get_stats()\n assert crawler.stats.get_value(\"feedexport/success_count/FileFeedStorage\") == 1\n assert (\n crawler.stats.get_value(\"feedexport/success_count/StdoutFeedStorage\") == 1\n )\n\n @coroutine_test\n async def test_export_items(self):\n # feed exporters use field names from Item\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem({\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux2\"}),\n ]\n rows = [\n {\"egg\": \"spam1\", \"foo\": \"bar1\", \"baz\": \"\"},\n {\"egg\": \"spam2\", \"foo\": \"bar2\", \"baz\": \"quux2\"},\n ]\n header = self.MyItem.fields.keys()\n await self.assertExported(items, header, rows)\n\n @coroutine_test\n async def test_export_no_items_not_store_empty(self):\n for fmt in (\"json\", \"jsonlines\", \"xml\", \"csv\"):\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": fmt},\n },\n \"FEED_STORE_EMPTY\": False,\n }\n data = await self.exported_no_data(settings)\n assert data[fmt] is None\n\n @coroutine_test\n async def test_start_finish_exporting_items(self):\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n ]\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n },\n \"FEED_EXPORT_INDENT\": None,\n }\n\n listener = IsExportingListener()\n InstrumentedFeedSlot.subscribe__listener(listener)\n\n with mock.patch(\"scrapy.extensions.feedexport.FeedSlot\", InstrumentedFeedSlot):\n await self.exported_data(items, settings)\n assert not listener.start_without_finish\n assert not listener.finish_without_start\n\n @coroutine_test\n async def test_start_finish_exporting_no_items(self):\n items = []\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n },\n \"FEED_EXPORT_INDENT\": None,\n }\n\n listener = IsExportingListener()\n InstrumentedFeedSlot.subscribe__listener(listener)\n\n with mock.patch(\"scrapy.extensions.feedexport.FeedSlot\", InstrumentedFeedSlot):\n await self.exported_data(items, settings)\n assert not listener.start_without_finish\n assert not listener.finish_without_start\n\n @coroutine_test\n async def test_start_finish_exporting_items_exception(self):\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n ]\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n },\n \"FEED_EXPORTERS\": {\"json\": ExceptionJsonItemExporter},\n \"FEED_EXPORT_INDENT\": None,\n }\n\n listener = IsExportingListener()\n InstrumentedFeedSlot.subscribe__listener(listener)\n\n with mock.patch(\"scrapy.extensions.feedexport.FeedSlot\", InstrumentedFeedSlot):\n await self.exported_data(items, settings)\n assert not listener.start_without_finish\n assert not listener.finish_without_start\n\n @coroutine_test\n async def test_start_finish_exporting_no_items_exception(self):\n items = []\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n },\n \"FEED_EXPORTERS\": {\"json\": ExceptionJsonItemExporter},\n \"FEED_EXPORT_INDENT\": None,\n }\n\n listener = IsExportingListener()\n InstrumentedFeedSlot.subscribe__listener(listener)\n\n with mock.patch(\"scrapy.extensions.feedexport.FeedSlot\", InstrumentedFeedSlot):\n await self.exported_data(items, settings)\n assert not listener.start_without_finish\n assert not listener.finish_without_start\n\n @coroutine_test\n async def test_export_no_items_store_empty(self):\n formats = (\n (\"json\", b\"[]\"),\n (\"jsonlines\", b\"\"),\n (\"xml\", b'<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n<items></items>'),\n (\"csv\", b\"\"),\n )\n\n for fmt, expctd in formats:\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": fmt},\n },\n \"FEED_STORE_EMPTY\": True,\n \"FEED_EXPORT_INDENT\": None,\n }\n data = await self.exported_no_data(settings)\n assert expctd == data[fmt]\n\n @coroutine_test\n async def test_export_no_items_multiple_feeds(self):\n \"\"\"Make sure that `storage.store` is called for every feed.\"\"\"\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n self._random_temp_filename(): {\"format\": \"xml\"},\n self._random_temp_filename(): {\"format\": \"csv\"},\n },\n \"FEED_STORAGES\": {\"file\": LogOnStoreFileStorage},\n \"FEED_STORE_EMPTY\": False,\n }\n\n with LogCapture() as log:\n await self.exported_no_data(settings)\n\n assert str(log).count(\"Storage.store is called\") == 0\n\n @coroutine_test\n async def test_export_multiple_item_classes(self):\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem2({\"hello\": \"world2\", \"foo\": \"bar2\"}),\n self.MyItem({\"foo\": \"bar3\", \"egg\": \"spam3\", \"baz\": \"quux3\"}),\n {\"hello\": \"world4\", \"egg\": \"spam4\"},\n ]\n\n # by default, Scrapy uses fields of the first Item for CSV and\n # all fields for JSON Lines\n header = self.MyItem.fields.keys()\n rows_csv = [\n {\"egg\": \"spam1\", \"foo\": \"bar1\", \"baz\": \"\"},\n {\"egg\": \"\", \"foo\": \"bar2\", \"baz\": \"\"},\n {\"egg\": \"spam3\", \"foo\": \"bar3\", \"baz\": \"quux3\"},\n {\"egg\": \"spam4\", \"foo\": \"\", \"baz\": \"\"},\n ]\n rows_jl = [dict(row) for row in items]\n await self.assertExportedCsv(items, header, rows_csv)\n await self.assertExportedJsonLines(items, rows_jl)\n\n @coroutine_test\n async def test_export_items_empty_field_list(self):\n # FEED_EXPORT_FIELDS==[] means the same as default None\n items = [{\"foo\": \"bar\"}]\n header = [\"foo\"]\n rows = [{\"foo\": \"bar\"}]\n settings = {\"FEED_EXPORT_FIELDS\": []}\n await self.assertExportedCsv(items, header, rows)\n await self.assertExportedJsonLines(items, rows, settings)\n\n @coroutine_test\n async def test_export_items_field_list(self):\n items = [{\"foo\": \"bar\"}]\n header = [\"foo\", \"baz\"]\n rows = [{\"foo\": \"bar\", \"baz\": \"\"}]\n settings = {\"FEED_EXPORT_FIELDS\": header}\n await self.assertExported(items, header, rows, settings=settings)\n\n @coroutine_test\n async def test_export_items_comma_separated_field_list(self):\n items = [{\"foo\": \"bar\"}]\n header = [\"foo\", \"baz\"]\n rows = [{\"foo\": \"bar\", \"baz\": \"\"}]\n settings = {\"FEED_EXPORT_FIELDS\": \",\".join(header)}\n await self.assertExported(items, header, rows, settings=settings)\n\n @coroutine_test\n async def test_export_items_json_field_list(self):\n items = [{\"foo\": \"bar\"}]\n header = [\"foo\", \"baz\"]\n rows = [{\"foo\": \"bar\", \"baz\": \"\"}]\n settings = {\"FEED_EXPORT_FIELDS\": json.dumps(header)}\n await self.assertExported(items, header, rows, settings=settings)\n\n @coroutine_test\n async def test_export_items_field_names(self):\n items = [{\"foo\": \"bar\"}]\n header = {\"foo\": \"Foo\"}\n rows = [{\"Foo\": \"bar\"}]\n settings = {\"FEED_EXPORT_FIELDS\": header}\n await self.assertExported(items, list(header.values()), rows, settings=settings)\n\n @coroutine_test\n async def test_export_items_dict_field_names(self):\n items = [{\"foo\": \"bar\"}]\n header = {\n \"baz\": \"Baz\",\n \"foo\": \"Foo\",\n }\n rows = [{\"Baz\": \"\", \"Foo\": \"bar\"}]\n settings = {\"FEED_EXPORT_FIELDS\": header}\n await self.assertExported(items, [\"Baz\", \"Foo\"], rows, settings=settings)\n\n @coroutine_test\n async def test_export_items_json_field_names(self):\n items = [{\"foo\": \"bar\"}]\n header = {\"foo\": \"Foo\"}\n rows = [{\"Foo\": \"bar\"}]\n settings = {\"FEED_EXPORT_FIELDS\": json.dumps(header)}\n await self.assertExported(items, list(header.values()), rows, settings=settings)\n\n @coroutine_test\n async def test_export_based_on_item_classes(self):\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem2({\"hello\": \"world2\", \"foo\": \"bar2\"}),\n {\"hello\": \"world3\", \"egg\": \"spam3\"},\n ]\n\n formats = {\n \"csv\": b\"baz,egg,foo\\r\\n,spam1,bar1\\r\\n\",\n \"json\": b'[\\n{\"hello\": \"world2\", \"foo\": \"bar2\"}\\n]',\n \"jsonlines\": (\n b'{\"foo\": \"bar1\", \"egg\": \"spam1\"}\\n{\"hello\": \"world2\", \"foo\": \"bar2\"}\\n'\n ),\n \"xml\": (\n b'<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n<items>\\n<item>'\n b\"<foo>bar1</foo><egg>spam1</egg></item>\\n<item><hello>\"\n b\"world2</hello><foo>bar2</foo></item>\\n<item><hello>world3\"\n b\"</hello><egg>spam3</egg></item>\\n</items>\"\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\n \"format\": \"csv\",\n \"item_classes\": [self.MyItem],\n },\n self._random_temp_filename(): {\n \"format\": \"json\",\n \"item_classes\": [self.MyItem2],\n },\n self._random_temp_filename(): {\n \"format\": \"jsonlines\",\n \"item_classes\": [self.MyItem, self.MyItem2],\n },\n self._random_temp_filename(): {\n \"format\": \"xml\",\n },\n },\n }\n\n data = await self.exported_data(items, settings)\n for fmt, expected in formats.items():\n assert data[fmt] == expected\n\n @coroutine_test\n async def test_export_based_on_custom_filters(self):\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem2({\"hello\": \"world2\", \"foo\": \"bar2\"}),\n {\"hello\": \"world3\", \"egg\": \"spam3\"},\n ]\n\n MyItem = self.MyItem\n\n class CustomFilter1:\n def __init__(self, feed_options):\n pass\n\n def accepts(self, item):\n return isinstance(item, MyItem)\n\n class CustomFilter2(scrapy.extensions.feedexport.ItemFilter):\n def accepts(self, item):\n return \"foo\" in item.fields\n\n class CustomFilter3(scrapy.extensions.feedexport.ItemFilter):\n def accepts(self, item):\n return (\n isinstance(item, tuple(self.item_classes)) and item[\"foo\"] == \"bar1\"\n )\n\n formats = {\n \"json\": b'[\\n{\"foo\": \"bar1\", \"egg\": \"spam1\"}\\n]',\n \"xml\": (\n b'<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n<items>\\n<item>'\n b\"<foo>bar1</foo><egg>spam1</egg></item>\\n<item><hello>\"\n b\"world2</hello><foo>bar2</foo></item>\\n</items>\"\n ),\n \"jsonlines\": b'{\"foo\": \"bar1\", \"egg\": \"spam1\"}\\n',\n }\n\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\n \"format\": \"json\",\n \"item_filter\": CustomFilter1,\n },\n self._random_temp_filename(): {\n \"format\": \"xml\",\n \"item_filter\": CustomFilter2,\n },\n self._random_temp_filename(): {\n \"format\": \"jsonlines\",\n \"item_classes\": [self.MyItem, self.MyItem2],\n \"item_filter\": CustomFilter3,\n },\n },\n }\n\n data = await self.exported_data(items, settings)\n for fmt, expected in formats.items():\n assert data[fmt] == expected\n\n @coroutine_test\n async def test_export_dicts(self):\n # When dicts are used, only keys from the first row are used as\n # a header for CSV, and all fields are used for JSON Lines.\n items = [\n {\"foo\": \"bar\", \"egg\": \"spam\"},\n {\"foo\": \"bar\", \"egg\": \"spam\", \"baz\": \"quux\"},\n ]\n rows_csv = [{\"egg\": \"spam\", \"foo\": \"bar\"}, {\"egg\": \"spam\", \"foo\": \"bar\"}]\n rows_jl = items\n await self.assertExportedCsv(items, [\"foo\", \"egg\"], rows_csv)\n await self.assertExportedJsonLines(items, rows_jl)\n\n @coroutine_test\n async def test_export_tuple(self):\n items = [\n {\"foo\": \"bar1\", \"egg\": \"spam1\"},\n {\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux\"},\n ]\n\n settings = {\"FEED_EXPORT_FIELDS\": (\"foo\", \"baz\")}\n rows = [{\"foo\": \"bar1\", \"baz\": \"\"}, {\"foo\": \"bar2\", \"baz\": \"quux\"}]\n await self.assertExported(items, [\"foo\", \"baz\"], rows, settings=settings)\n\n @coroutine_test\n async def test_export_feed_export_fields(self):\n # FEED_EXPORT_FIELDS option allows to order export fields\n # and to select a subset of fields to export, both for Items and dicts.\n\n for item_cls in [self.MyItem, dict]:\n items = [\n item_cls({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n item_cls({\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux2\"}),\n ]\n\n # export all columns\n settings = {\"FEED_EXPORT_FIELDS\": \"foo,baz,egg\"}\n rows = [\n {\"egg\": \"spam1\", \"foo\": \"bar1\", \"baz\": \"\"},\n {\"egg\": \"spam2\", \"foo\": \"bar2\", \"baz\": \"quux2\"},\n ]\n await self.assertExported(\n items, [\"foo\", \"baz\", \"egg\"], rows, settings=settings\n )\n\n # export a subset of columns\n settings = {\"FEED_EXPORT_FIELDS\": \"egg,baz\"}\n rows = [{\"egg\": \"spam1\", \"baz\": \"\"}, {\"egg\": \"spam2\", \"baz\": \"quux2\"}]\n await self.assertExported(items, [\"egg\", \"baz\"], rows, settings=settings)\n\n @coroutine_test\n async def test_export_encoding(self):\n items = [{\"foo\": \"Test\\xd6\"}]\n\n formats = {\n \"json\": b'[{\"foo\": \"Test\\\\u00d6\"}]',\n \"jsonlines\": b'{\"foo\": \"Test\\\\u00d6\"}\\n',\n \"xml\": (\n '<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n'\n \"<items><item><foo>Test\\xd6</foo></item></items>\"\n ).encode(),\n \"csv\": \"foo\\r\\nTest\\xd6\\r\\n\".encode(),\n }\n\n for fmt, expected in formats.items():\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": fmt},\n },\n \"FEED_EXPORT_INDENT\": None,\n }\n data = await self.exported_data(items, settings)\n assert data[fmt] == expected\n\n formats = {\n \"json\": b'[{\"foo\": \"Test\\xd6\"}]',\n \"jsonlines\": b'{\"foo\": \"Test\\xd6\"}\\n',\n \"xml\": (\n b'<?xml version=\"1.0\" encoding=\"latin-1\"?>\\n'\n b\"<items><item><foo>Test\\xd6</foo></item></items>\"\n ),\n \"csv\": b\"foo\\r\\nTest\\xd6\\r\\n\",\n }\n\n for fmt, expected in formats.items():\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": fmt},\n },\n \"FEED_EXPORT_INDENT\": None,\n \"FEED_EXPORT_ENCODING\": \"latin-1\",\n }\n data = await self.exported_data(items, settings)\n assert data[fmt] == expected\n\n @coroutine_test\n async def test_export_multiple_configs(self):\n items = [{\"foo\": \"FOO\", \"bar\": \"BAR\"}]\n\n formats = {\n \"json\": b'[\\n{\"bar\": \"BAR\"}\\n]',\n \"xml\": (\n b'<?xml version=\"1.0\" encoding=\"latin-1\"?>\\n'\n b\"<items>\\n <item>\\n <foo>FOO</foo>\\n </item>\\n</items>\"\n ),\n \"csv\": b\"bar,foo\\r\\nBAR,FOO\\r\\n\",\n }\n\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\n \"format\": \"json\",\n \"indent\": 0,\n \"fields\": [\"bar\"],\n \"encoding\": \"utf-8\",\n },\n self._random_temp_filename(): {\n \"format\": \"xml\",\n \"indent\": 2,\n \"fields\": [\"foo\"],\n \"encoding\": \"latin-1\",\n },\n self._random_temp_filename(): {\n \"format\": \"csv\",\n \"indent\": None,\n \"fields\": [\"bar\", \"foo\"],\n \"encoding\": \"utf-8\",\n },\n },\n }\n\n data = await self.exported_data(items, settings)\n for fmt, expected in formats.items():\n assert data[fmt] == expected\n\n @coroutine_test\n async def test_export_indentation(self):\n items = [\n {\"foo\": [\"bar\"]},\n {\"key\": \"value\"},\n ]\n\n test_cases = [\n # JSON\n {\n \"format\": \"json\",\n \"indent\": None,\n \"expected\": b'[{\"foo\": [\"bar\"]},{\"key\": \"value\"}]',\n },\n {\n \"format\": \"json\",\n \"indent\": -1,\n \"expected\": b\"\"\"[\n{\"foo\": [\"bar\"]},\n{\"key\": \"value\"}\n]\"\"\",\n },\n {\n \"format\": \"json\",\n \"indent\": 0,\n \"expected\": b\"\"\"[\n{\"foo\": [\"bar\"]},\n{\"key\": \"value\"}\n]\"\"\",\n },\n {\n \"format\": \"json\",\n \"indent\": 2,\n \"expected\": b\"\"\"[\n{\n \"foo\": [\n \"bar\"\n ]\n},\n{\n \"key\": \"value\"\n}\n]\"\"\",\n },\n {\n \"format\": \"json\",\n \"indent\": 4,\n \"expected\": b\"\"\"[\n{\n \"foo\": [\n \"bar\"\n ]\n},\n{\n \"key\": \"value\"\n}\n]\"\"\",\n },\n {\n \"format\": \"json\",\n \"indent\": 5,\n \"expected\": b\"\"\"[\n{\n \"foo\": [\n \"bar\"\n ]\n},\n{\n \"key\": \"value\"\n}\n]\"\"\",\n },\n # XML\n {\n \"format\": \"xml\",\n \"indent\": None,\n \"expected\": b\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<items><item><foo><value>bar</value></foo></item><item><key>value</key></item></items>\"\"\",\n },\n {\n \"format\": \"xml\",\n \"indent\": -1,\n \"expected\": b\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<items>\n<item><foo><value>bar</value></foo></item>\n<item><key>value</key></item>\n</items>\"\"\",\n },\n {\n \"format\": \"xml\",\n \"indent\": 0,\n \"expected\": b\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<items>\n<item><foo><value>bar</value></foo></item>\n<item><key>value</key></item>\n</items>\"\"\",\n },\n {\n \"format\": \"xml\",\n \"indent\": 2,\n \"expected\": b\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<items>\n <item>\n <foo>\n <value>bar</value>\n </foo>\n </item>\n <item>\n <key>value</key>\n </item>\n</items>\"\"\",\n },\n {\n \"format\": \"xml\",\n \"indent\": 4,\n \"expected\": b\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<items>\n <item>\n <foo>\n <value>bar</value>\n </foo>\n </item>\n <item>\n <key>value</key>\n </item>\n</items>\"\"\",\n },\n {\n \"format\": \"xml\",\n \"indent\": 5,\n \"expected\": b\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<items>\n <item>\n <foo>\n <value>bar</value>\n </foo>\n </item>\n <item>\n <key>value</key>\n </item>\n</items>\"\"\",\n },\n ]\n\n for row in test_cases:\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\n \"format\": row[\"format\"],\n \"indent\": row[\"indent\"],\n },\n },\n }\n data = await self.exported_data(items, settings)\n assert data[row[\"format\"]] == row[\"expected\"]\n\n @coroutine_test\n async def test_init_exporters_storages_with_crawler(self):\n settings = {\n \"FEED_EXPORTERS\": {\"csv\": FromCrawlerCsvItemExporter},\n \"FEED_STORAGES\": {\"file\": FromCrawlerFileFeedStorage},\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"csv\"},\n },\n }\n await self.exported_data(items=[], settings=settings)\n assert FromCrawlerCsvItemExporter.init_with_crawler\n assert FromCrawlerFileFeedStorage.init_with_crawler\n\n @coroutine_test\n async def test_str_uri(self):\n settings = {\n \"FEED_STORE_EMPTY\": True,\n \"FEEDS\": {str(self._random_temp_filename()): {\"format\": \"csv\"}},\n }\n data = await self.exported_no_data(settings)\n assert data[\"csv\"] == b\"\"\n\n @pytest.mark.requires_reactor # needs a reactor for BlockingFeedStorage\n @coroutine_test\n async def test_multiple_feeds_success_logs_blocking_feed_storage(self):\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n self._random_temp_filename(): {\"format\": \"xml\"},\n self._random_temp_filename(): {\"format\": \"csv\"},\n },\n \"FEED_STORAGES\": {\"file\": DummyBlockingFeedStorage},\n }\n items = [\n {\"foo\": \"bar1\", \"baz\": \"\"},\n {\"foo\": \"bar2\", \"baz\": \"quux\"},\n ]\n with LogCapture() as log:\n await self.exported_data(items, settings)\n\n for fmt in [\"json\", \"xml\", \"csv\"]:\n assert f\"Stored {fmt} feed (2 items)\" in str(log)\n\n @coroutine_test\n async def test_multiple_feeds_failing_logs_blocking_feed_storage(self):\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"json\"},\n self._random_temp_filename(): {\"format\": \"xml\"},\n self._random_temp_filename(): {\"format\": \"csv\"},\n },\n \"FEED_STORAGES\": {\"file\": FailingBlockingFeedStorage},\n }\n items = [\n {\"foo\": \"bar1\", \"baz\": \"\"},\n {\"foo\": \"bar2\", \"baz\": \"quux\"},\n ]\n with LogCapture() as log:\n await self.exported_data(items, settings)\n\n for fmt in [\"json\", \"xml\", \"csv\"]:\n assert f\"Error storing {fmt} feed (2 items)\" in str(log)\n\n @coroutine_test\n async def test_extend_kwargs(self):\n items = [{\"foo\": \"FOO\", \"bar\": \"BAR\"}]\n\n expected_with_title_csv = b\"foo,bar\\r\\nFOO,BAR\\r\\n\"\n expected_without_title_csv = b\"FOO,BAR\\r\\n\"\n test_cases = [\n # with title\n {\n \"options\": {\n \"format\": \"csv\",\n \"item_export_kwargs\": {\"include_headers_line\": True},\n },\n \"expected\": expected_with_title_csv,\n },\n # without title\n {\n \"options\": {\n \"format\": \"csv\",\n \"item_export_kwargs\": {\"include_headers_line\": False},\n },\n \"expected\": expected_without_title_csv,\n },\n ]\n\n for row in test_cases:\n feed_options = row[\"options\"]\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): feed_options,\n },\n \"FEED_EXPORT_INDENT\": None,\n }\n\n data = await self.exported_data(items, settings)\n assert data[feed_options[\"format\"]] == row[\"expected\"]\n\n @coroutine_test\n async def test_storage_file_no_postprocessing(self):\n @implementer(IFeedStorage)\n class Storage:\n def __init__(self, uri, *, feed_options=None):\n pass\n\n def open(self, spider):\n Storage.open_file = tempfile.NamedTemporaryFile(prefix=\"feed-\")\n return Storage.open_file\n\n def store(self, file):\n Storage.store_file = file\n file.close()\n\n settings = {\n \"FEEDS\": {self._random_temp_filename(): {\"format\": \"jsonlines\"}},\n \"FEED_STORAGES\": {\"file\": Storage},\n }\n await self.exported_no_data(settings)\n assert Storage.open_file is Storage.store_file\n\n @coroutine_test\n async def test_storage_file_postprocessing(self):\n @implementer(IFeedStorage)\n class Storage:\n def __init__(self, uri, *, feed_options=None):\n pass\n\n def open(self, spider):\n Storage.open_file = tempfile.NamedTemporaryFile(prefix=\"feed-\")\n return Storage.open_file\n\n def store(self, file):\n Storage.store_file = file\n Storage.file_was_closed = file.closed\n file.close()\n\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\n \"format\": \"jsonlines\",\n \"postprocessing\": [\n \"scrapy.extensions.postprocessing.GzipPlugin\",\n ],\n },\n },\n \"FEED_STORAGES\": {\"file\": Storage},\n }\n await self.exported_no_data(settings)\n assert Storage.open_file is Storage.store_file\n assert not Storage.file_was_closed\n\n\nclass TestFeedPostProcessedExports(TestFeedExportBase):\n items = [{\"foo\": \"bar\"}]\n expected = b\"foo\\r\\nbar\\r\\n\"\n\n class MyPlugin1:\n def __init__(self, file, feed_options):\n self.file = file\n self.feed_options = feed_options\n self.char = self.feed_options.get(\"plugin1_char\", b\"\")\n\n def write(self, data):\n written_count = self.file.write(data)\n written_count += self.file.write(self.char)\n return written_count\n\n def close(self):\n self.file.close()\n\n def _named_tempfile(self, name) -> str:\n return str(Path(self.temp_dir, name))\n\n async def run_and_export(\n self, spider_cls: type[Spider], settings: dict[str, Any]\n ) -> dict[str, bytes | None]:\n \"\"\"Run spider with specified settings; return exported data with filename.\"\"\"\n\n FEEDS = settings.get(\"FEEDS\") or {}\n settings[\"FEEDS\"] = {\n printf_escape(path_to_url(file_path)): feed_options\n for file_path, feed_options in FEEDS.items()\n }\n\n content: dict[str, bytes | None] = {}\n try:\n spider_cls.start_urls = [self.mockserver.url(\"/\")]\n crawler = get_crawler(spider_cls, settings)\n await crawler.crawl_async()\n\n for file_path in FEEDS:\n content[str(file_path)] = (\n Path(file_path).read_bytes() if Path(file_path).exists() else None\n )\n\n finally:\n for file_path in FEEDS:\n if not Path(file_path).exists():\n continue\n\n Path(file_path).unlink()\n\n return content\n\n def get_gzip_compressed(self, data, compresslevel=9, mtime=0, filename=\"\"):\n data_stream = BytesIO()\n gzipf = gzip.GzipFile(\n fileobj=data_stream,\n filename=filename,\n mtime=mtime,\n compresslevel=compresslevel,\n mode=\"wb\",\n )\n gzipf.write(data)\n gzipf.close()\n data_stream.seek(0)\n return data_stream.read()\n\n @coroutine_test\n async def test_gzip_plugin(self):\n filename = self._named_tempfile(\"gzip_file\")\n\n settings = {\n \"FEEDS\": {\n filename: {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n try:\n gzip.decompress(data[filename])\n except OSError:\n pytest.fail(\"Received invalid gzip data.\")\n\n @coroutine_test\n async def test_gzip_plugin_compresslevel(self):\n filename_to_compressed = {\n self._named_tempfile(\"compresslevel_0\"): self.get_gzip_compressed(\n self.expected, compresslevel=0\n ),\n self._named_tempfile(\"compresslevel_9\"): self.get_gzip_compressed(\n self.expected, compresslevel=9\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"compresslevel_0\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n \"gzip_compresslevel\": 0,\n \"gzip_mtime\": 0,\n \"gzip_filename\": \"\",\n },\n self._named_tempfile(\"compresslevel_9\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n \"gzip_compresslevel\": 9,\n \"gzip_mtime\": 0,\n \"gzip_filename\": \"\",\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = gzip.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_gzip_plugin_mtime(self):\n filename_to_compressed = {\n self._named_tempfile(\"mtime_123\"): self.get_gzip_compressed(\n self.expected, mtime=123\n ),\n self._named_tempfile(\"mtime_123456789\"): self.get_gzip_compressed(\n self.expected, mtime=123456789\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"mtime_123\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n \"gzip_mtime\": 123,\n \"gzip_filename\": \"\",\n },\n self._named_tempfile(\"mtime_123456789\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n \"gzip_mtime\": 123456789,\n \"gzip_filename\": \"\",\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = gzip.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_gzip_plugin_filename(self):\n filename_to_compressed = {\n self._named_tempfile(\"filename_FILE1\"): self.get_gzip_compressed(\n self.expected, filename=\"FILE1\"\n ),\n self._named_tempfile(\"filename_FILE2\"): self.get_gzip_compressed(\n self.expected, filename=\"FILE2\"\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"filename_FILE1\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n \"gzip_mtime\": 0,\n \"gzip_filename\": \"FILE1\",\n },\n self._named_tempfile(\"filename_FILE2\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.GzipPlugin\"],\n \"gzip_mtime\": 0,\n \"gzip_filename\": \"FILE2\",\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = gzip.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_lzma_plugin(self):\n filename = self._named_tempfile(\"lzma_file\")\n\n settings = {\n \"FEEDS\": {\n filename: {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n try:\n lzma.decompress(data[filename])\n except lzma.LZMAError:\n pytest.fail(\"Received invalid lzma data.\")\n\n @coroutine_test\n async def test_lzma_plugin_format(self):\n filename_to_compressed = {\n self._named_tempfile(\"format_FORMAT_XZ\"): lzma.compress(\n self.expected, format=lzma.FORMAT_XZ\n ),\n self._named_tempfile(\"format_FORMAT_ALONE\"): lzma.compress(\n self.expected, format=lzma.FORMAT_ALONE\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"format_FORMAT_XZ\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_format\": lzma.FORMAT_XZ,\n },\n self._named_tempfile(\"format_FORMAT_ALONE\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_format\": lzma.FORMAT_ALONE,\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = lzma.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_lzma_plugin_check(self):\n filename_to_compressed = {\n self._named_tempfile(\"check_CHECK_NONE\"): lzma.compress(\n self.expected, check=lzma.CHECK_NONE\n ),\n self._named_tempfile(\"check_CHECK_CRC256\"): lzma.compress(\n self.expected, check=lzma.CHECK_SHA256\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"check_CHECK_NONE\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_check\": lzma.CHECK_NONE,\n },\n self._named_tempfile(\"check_CHECK_CRC256\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_check\": lzma.CHECK_SHA256,\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = lzma.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_lzma_plugin_preset(self):\n filename_to_compressed = {\n self._named_tempfile(\"preset_PRESET_0\"): lzma.compress(\n self.expected, preset=0\n ),\n self._named_tempfile(\"preset_PRESET_9\"): lzma.compress(\n self.expected, preset=9\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"preset_PRESET_0\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_preset\": 0,\n },\n self._named_tempfile(\"preset_PRESET_9\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_preset\": 9,\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = lzma.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_lzma_plugin_filters(self):\n if \"PyPy\" in sys.version:\n # https://foss.heptapod.net/pypy/pypy/-/issues/3527\n pytest.skip(\"lzma filters doesn't work in PyPy\")\n\n filters = [{\"id\": lzma.FILTER_LZMA2}]\n compressed = lzma.compress(self.expected, filters=filters)\n filename = self._named_tempfile(\"filters\")\n\n settings = {\n \"FEEDS\": {\n filename: {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.LZMAPlugin\"],\n \"lzma_filters\": filters,\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n assert compressed == data[filename]\n result = lzma.decompress(data[filename])\n assert result == self.expected\n\n @coroutine_test\n async def test_bz2_plugin(self):\n filename = self._named_tempfile(\"bz2_file\")\n\n settings = {\n \"FEEDS\": {\n filename: {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.Bz2Plugin\"],\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n try:\n bz2.decompress(data[filename])\n except OSError:\n pytest.fail(\"Received invalid bz2 data.\")\n\n @coroutine_test\n async def test_bz2_plugin_compresslevel(self):\n filename_to_compressed = {\n self._named_tempfile(\"compresslevel_1\"): bz2.compress(\n self.expected, compresslevel=1\n ),\n self._named_tempfile(\"compresslevel_9\"): bz2.compress(\n self.expected, compresslevel=9\n ),\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"compresslevel_1\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.Bz2Plugin\"],\n \"bz2_compresslevel\": 1,\n },\n self._named_tempfile(\"compresslevel_9\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\"scrapy.extensions.postprocessing.Bz2Plugin\"],\n \"bz2_compresslevel\": 9,\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, compressed in filename_to_compressed.items():\n result = bz2.decompress(data[filename])\n assert compressed == data[filename]\n assert result == self.expected\n\n @coroutine_test\n async def test_custom_plugin(self):\n filename = self._named_tempfile(\"csv_file\")\n\n settings = {\n \"FEEDS\": {\n filename: {\n \"format\": \"csv\",\n \"postprocessing\": [self.MyPlugin1],\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n assert data[filename] == self.expected\n\n @coroutine_test\n async def test_custom_plugin_with_parameter(self):\n expected = b\"foo\\r\\n\\nbar\\r\\n\\n\"\n filename = self._named_tempfile(\"newline\")\n\n settings = {\n \"FEEDS\": {\n filename: {\n \"format\": \"csv\",\n \"postprocessing\": [self.MyPlugin1],\n \"plugin1_char\": b\"\\n\",\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n assert data[filename] == expected\n\n @coroutine_test\n async def test_custom_plugin_with_compression(self):\n expected = b\"foo\\r\\n\\nbar\\r\\n\\n\"\n\n filename_to_decompressor = {\n self._named_tempfile(\"bz2\"): bz2.decompress,\n self._named_tempfile(\"lzma\"): lzma.decompress,\n self._named_tempfile(\"gzip\"): gzip.decompress,\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"bz2\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\n self.MyPlugin1,\n \"scrapy.extensions.postprocessing.Bz2Plugin\",\n ],\n \"plugin1_char\": b\"\\n\",\n },\n self._named_tempfile(\"lzma\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\n self.MyPlugin1,\n \"scrapy.extensions.postprocessing.LZMAPlugin\",\n ],\n \"plugin1_char\": b\"\\n\",\n },\n self._named_tempfile(\"gzip\"): {\n \"format\": \"csv\",\n \"postprocessing\": [\n self.MyPlugin1,\n \"scrapy.extensions.postprocessing.GzipPlugin\",\n ],\n \"plugin1_char\": b\"\\n\",\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, decompressor in filename_to_decompressor.items():\n result = decompressor(data[filename])\n assert result == expected\n\n @coroutine_test\n async def test_exports_compatibility_with_postproc(self):\n filename_to_expected = {\n self._named_tempfile(\"csv\"): b\"foo\\r\\nbar\\r\\n\",\n self._named_tempfile(\"json\"): b'[\\n{\"foo\": \"bar\"}\\n]',\n self._named_tempfile(\"jsonlines\"): b'{\"foo\": \"bar\"}\\n',\n self._named_tempfile(\"xml\"): b'<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n'\n b\"<items>\\n<item><foo>bar</foo></item>\\n</items>\",\n }\n\n settings = {\n \"FEEDS\": {\n self._named_tempfile(\"csv\"): {\n \"format\": \"csv\",\n \"postprocessing\": [self.MyPlugin1],\n # empty plugin to activate postprocessing.PostProcessingManager\n },\n self._named_tempfile(\"json\"): {\n \"format\": \"json\",\n \"postprocessing\": [self.MyPlugin1],\n },\n self._named_tempfile(\"jsonlines\"): {\n \"format\": \"jsonlines\",\n \"postprocessing\": [self.MyPlugin1],\n },\n self._named_tempfile(\"xml\"): {\n \"format\": \"xml\",\n \"postprocessing\": [self.MyPlugin1],\n },\n self._named_tempfile(\"marshal\"): {\n \"format\": \"marshal\",\n \"postprocessing\": [self.MyPlugin1],\n },\n self._named_tempfile(\"pickle\"): {\n \"format\": \"pickle\",\n \"postprocessing\": [self.MyPlugin1],\n },\n },\n }\n\n data = await self.exported_data(self.items, settings)\n\n for filename, result in data.items():\n if \"pickle\" in filename:\n expected, result = self.items[0], pickle.loads(result)\n elif \"marshal\" in filename:\n expected, result = self.items[0], marshal.loads(result)\n else:\n expected = filename_to_expected[filename]\n assert result == expected\n\n\nclass TestBatchDeliveries(TestFeedExportBase):\n _file_mark = \"_%(batch_time)s_#%(batch_id)02d_\"\n\n async def run_and_export(\n self, spider_cls: type[Spider], settings: dict[str, Any]\n ) -> dict[str, list[bytes]]:\n \"\"\"Run spider with specified settings; return exported data.\"\"\"\n\n FEEDS = settings.get(\"FEEDS\") or {}\n settings[\"FEEDS\"] = {\n build_url(file_path): feed for file_path, feed in FEEDS.items()\n }\n content: defaultdict[str, list[bytes]] = defaultdict(list)\n spider_cls.start_urls = [self.mockserver.url(\"/\")]\n crawler = get_crawler(spider_cls, settings)\n await crawler.crawl_async()\n\n for path, feed in FEEDS.items():\n dir_name = Path(path).parent\n if not dir_name.exists():\n content[feed[\"format\"]] = []\n continue\n for file in sorted(dir_name.iterdir()):\n content[feed[\"format\"]].append(file.read_bytes())\n return content\n\n async def assertExportedJsonLines(self, items, rows, settings=None):\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename() / \"jl\" / self._file_mark: {\n \"format\": \"jl\"\n },\n },\n }\n )\n batch_size = Settings(settings).getint(\"FEED_EXPORT_BATCH_ITEM_COUNT\")\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n data = await self.exported_data(items, settings)\n for batch in data[\"jl\"]:\n got_batch = [\n json.loads(to_unicode(batch_item)) for batch_item in batch.splitlines()\n ]\n expected_batch, rows = rows[:batch_size], rows[batch_size:]\n assert got_batch == expected_batch\n\n async def assertExportedCsv(self, items, header, rows, settings=None):\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename() / \"csv\" / self._file_mark: {\n \"format\": \"csv\"\n },\n },\n }\n )\n batch_size = Settings(settings).getint(\"FEED_EXPORT_BATCH_ITEM_COUNT\")\n data = await self.exported_data(items, settings)\n for batch in data[\"csv\"]:\n got_batch = csv.DictReader(to_unicode(batch).splitlines())\n assert list(header) == got_batch.fieldnames\n expected_batch, rows = rows[:batch_size], rows[batch_size:]\n assert list(got_batch) == expected_batch\n\n async def assertExportedXml(self, items, rows, settings=None):\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename() / \"xml\" / self._file_mark: {\n \"format\": \"xml\"\n },\n },\n }\n )\n batch_size = Settings(settings).getint(\"FEED_EXPORT_BATCH_ITEM_COUNT\")\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n data = await self.exported_data(items, settings)\n for batch in data[\"xml\"]:\n root = lxml.etree.fromstring(batch)\n got_batch = [{e.tag: e.text for e in it} for it in root.findall(\"item\")]\n expected_batch, rows = rows[:batch_size], rows[batch_size:]\n assert got_batch == expected_batch\n\n async def assertExportedMultiple(self, items, rows, settings=None):\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename() / \"xml\" / self._file_mark: {\n \"format\": \"xml\"\n },\n self._random_temp_filename() / \"json\" / self._file_mark: {\n \"format\": \"json\"\n },\n },\n }\n )\n batch_size = Settings(settings).getint(\"FEED_EXPORT_BATCH_ITEM_COUNT\")\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n data = await self.exported_data(items, settings)\n # XML\n xml_rows = rows.copy()\n for batch in data[\"xml\"]:\n root = lxml.etree.fromstring(batch)\n got_batch = [{e.tag: e.text for e in it} for it in root.findall(\"item\")]\n expected_batch, xml_rows = xml_rows[:batch_size], xml_rows[batch_size:]\n assert got_batch == expected_batch\n # JSON\n json_rows = rows.copy()\n for batch in data[\"json\"]:\n got_batch = json.loads(batch.decode(\"utf-8\"))\n expected_batch, json_rows = json_rows[:batch_size], json_rows[batch_size:]\n assert got_batch == expected_batch\n\n async def assertExportedPickle(self, items, rows, settings=None):\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename() / \"pickle\" / self._file_mark: {\n \"format\": \"pickle\"\n },\n },\n }\n )\n batch_size = Settings(settings).getint(\"FEED_EXPORT_BATCH_ITEM_COUNT\")\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n data = await self.exported_data(items, settings)\n\n for batch in data[\"pickle\"]:\n got_batch = self._load_until_eof(batch, load_func=pickle.load)\n expected_batch, rows = rows[:batch_size], rows[batch_size:]\n assert got_batch == expected_batch\n\n async def assertExportedMarshal(self, items, rows, settings=None):\n settings = settings or {}\n settings.update(\n {\n \"FEEDS\": {\n self._random_temp_filename() / \"marshal\" / self._file_mark: {\n \"format\": \"marshal\"\n },\n },\n }\n )\n batch_size = Settings(settings).getint(\"FEED_EXPORT_BATCH_ITEM_COUNT\")\n rows = [{k: v for k, v in row.items() if v} for row in rows]\n data = await self.exported_data(items, settings)\n\n for batch in data[\"marshal\"]:\n got_batch = self._load_until_eof(batch, load_func=marshal.load)\n expected_batch, rows = rows[:batch_size], rows[batch_size:]\n assert got_batch == expected_batch\n\n @coroutine_test\n async def test_export_items(self):\n \"\"\"Test partial deliveries in all supported formats\"\"\"\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem({\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux2\"}),\n self.MyItem({\"foo\": \"bar3\", \"baz\": \"quux3\"}),\n ]\n rows = [\n {\"egg\": \"spam1\", \"foo\": \"bar1\", \"baz\": \"\"},\n {\"egg\": \"spam2\", \"foo\": \"bar2\", \"baz\": \"quux2\"},\n {\"foo\": \"bar3\", \"baz\": \"quux3\", \"egg\": \"\"},\n ]\n settings = {\"FEED_EXPORT_BATCH_ITEM_COUNT\": 2}\n header = self.MyItem.fields.keys()\n await self.assertExported(items, header, rows, settings=settings)\n\n def test_wrong_path(self):\n \"\"\"If path is without %(batch_time)s and %(batch_id) an exception must be raised\"\"\"\n settings = {\n \"FEEDS\": {\n self._random_temp_filename(): {\"format\": \"xml\"},\n },\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": 1,\n }\n crawler = get_crawler(settings_dict=settings)\n with pytest.raises(NotConfigured):\n FeedExporter(crawler)\n\n @coroutine_test\n async def test_export_no_items_not_store_empty(self):\n for fmt in (\"json\", \"jsonlines\", \"xml\", \"csv\"):\n settings = {\n \"FEEDS\": {\n self._random_temp_filename() / fmt / self._file_mark: {\n \"format\": fmt\n },\n },\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": 1,\n \"FEED_STORE_EMPTY\": False,\n }\n data = await self.exported_no_data(settings)\n data = dict(data)\n assert len(data[fmt]) == 0\n\n @coroutine_test\n async def test_export_no_items_store_empty(self):\n formats = (\n (\"json\", b\"[]\"),\n (\"jsonlines\", b\"\"),\n (\"xml\", b'<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n<items></items>'),\n (\"csv\", b\"\"),\n )\n\n for fmt, expctd in formats:\n settings = {\n \"FEEDS\": {\n self._random_temp_filename() / fmt / self._file_mark: {\n \"format\": fmt\n },\n },\n \"FEED_STORE_EMPTY\": True,\n \"FEED_EXPORT_INDENT\": None,\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": 1,\n }\n data = await self.exported_no_data(settings)\n data = dict(data)\n assert data[fmt][0] == expctd\n\n @coroutine_test\n async def test_export_multiple_configs(self):\n items = [\n {\"foo\": \"FOO\", \"bar\": \"BAR\"},\n {\"foo\": \"FOO1\", \"bar\": \"BAR1\"},\n ]\n\n formats = {\n \"json\": [\n b'[\\n{\"bar\": \"BAR\"}\\n]',\n b'[\\n{\"bar\": \"BAR1\"}\\n]',\n ],\n \"xml\": [\n (\n b'<?xml version=\"1.0\" encoding=\"latin-1\"?>\\n'\n b\"<items>\\n <item>\\n <foo>FOO</foo>\\n </item>\\n</items>\"\n ),\n (\n b'<?xml version=\"1.0\" encoding=\"latin-1\"?>\\n'\n b\"<items>\\n <item>\\n <foo>FOO1</foo>\\n </item>\\n</items>\"\n ),\n ],\n \"csv\": [\n b\"foo,bar\\r\\nFOO,BAR\\r\\n\",\n b\"foo,bar\\r\\nFOO1,BAR1\\r\\n\",\n ],\n }\n\n settings = {\n \"FEEDS\": {\n self._random_temp_filename() / \"json\" / self._file_mark: {\n \"format\": \"json\",\n \"indent\": 0,\n \"fields\": [\"bar\"],\n \"encoding\": \"utf-8\",\n },\n self._random_temp_filename() / \"xml\" / self._file_mark: {\n \"format\": \"xml\",\n \"indent\": 2,\n \"fields\": [\"foo\"],\n \"encoding\": \"latin-1\",\n },\n self._random_temp_filename() / \"csv\" / self._file_mark: {\n \"format\": \"csv\",\n \"indent\": None,\n \"fields\": [\"foo\", \"bar\"],\n \"encoding\": \"utf-8\",\n },\n },\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": 1,\n }\n data = await self.exported_data(items, settings)\n for fmt, expected in formats.items():\n for expected_batch, got_batch in zip(expected, data[fmt], strict=False):\n assert got_batch == expected_batch\n\n @coroutine_test\n async def test_batch_item_count_feeds_setting(self):\n items = [{\"foo\": \"FOO\"}, {\"foo\": \"FOO1\"}]\n formats = {\n \"json\": [\n b'[{\"foo\": \"FOO\"}]',\n b'[{\"foo\": \"FOO1\"}]',\n ],\n }\n settings = {\n \"FEEDS\": {\n self._random_temp_filename() / \"json\" / self._file_mark: {\n \"format\": \"json\",\n \"indent\": None,\n \"encoding\": \"utf-8\",\n \"batch_item_count\": 1,\n },\n },\n }\n data = await self.exported_data(items, settings)\n for fmt, expected in formats.items():\n for expected_batch, got_batch in zip(expected, data[fmt], strict=False):\n assert got_batch == expected_batch\n\n @coroutine_test\n async def test_batch_path_differ(self):\n \"\"\"\n Test that the name of all batch files differ from each other.\n So %(batch_id)d replaced with the current id.\n \"\"\"\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem({\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux2\"}),\n self.MyItem({\"foo\": \"bar3\", \"baz\": \"quux3\"}),\n ]\n settings = {\n \"FEEDS\": {\n self._random_temp_filename() / \"%(batch_id)d\": {\n \"format\": \"json\",\n },\n },\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": 1,\n }\n data = await self.exported_data(items, settings)\n assert len(items) == len(data[\"json\"])\n\n @inline_callbacks_test\n def test_stats_batch_file_success(self):\n settings = {\n \"FEEDS\": {\n build_url(\n str(self._random_temp_filename() / \"json\" / self._file_mark)\n ): {\n \"format\": \"json\",\n }\n },\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": 1,\n }\n crawler = get_crawler(ItemSpider, settings)\n yield crawler.crawl(total=2, mockserver=self.mockserver)\n assert \"feedexport/success_count/FileFeedStorage\" in crawler.stats.get_stats()\n assert crawler.stats.get_value(\"feedexport/success_count/FileFeedStorage\") == 12\n\n @pytest.mark.requires_boto3\n @inline_callbacks_test\n def test_s3_export(self):\n bucket = \"mybucket\"\n items = [\n self.MyItem({\"foo\": \"bar1\", \"egg\": \"spam1\"}),\n self.MyItem({\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux2\"}),\n self.MyItem({\"foo\": \"bar3\", \"baz\": \"quux3\"}),\n ]\n\n class CustomS3FeedStorage(S3FeedStorage):\n stubs = []\n\n def open(self, *args, **kwargs):\n from botocore import __version__ as botocore_version # noqa: PLC0415\n from botocore.stub import ANY, Stubber # noqa: PLC0415\n\n expected_params = {\n \"Body\": ANY,\n \"Bucket\": bucket,\n \"Key\": ANY,\n }\n if Version(botocore_version) >= Version(\"1.36.0\"):\n expected_params[\"ChecksumAlgorithm\"] = ANY\n\n stub = Stubber(self.s3_client)\n stub.activate()\n CustomS3FeedStorage.stubs.append(stub)\n stub.add_response(\n \"put_object\",\n expected_params=expected_params,\n service_response={},\n )\n return super().open(*args, **kwargs)\n\n key = \"export.csv\"\n uri = f\"s3://{bucket}/{key}/%(batch_id)d.json\"\n batch_item_count = 1\n settings = {\n \"AWS_ACCESS_KEY_ID\": \"access_key\",\n \"AWS_SECRET_ACCESS_KEY\": \"secret_key\",\n \"FEED_EXPORT_BATCH_ITEM_COUNT\": batch_item_count,\n \"FEED_STORAGES\": {\n \"s3\": CustomS3FeedStorage,\n },\n \"FEEDS\": {\n uri: {\n \"format\": \"json\",\n },\n },\n }\n crawler = get_crawler(settings_dict=settings)\n storage = S3FeedStorage.from_crawler(crawler, uri)\n verifyObject(IFeedStorage, storage)\n\n class TestSpider(scrapy.Spider):\n name = \"testspider\"\n\n def parse(self, response):\n yield from items\n\n TestSpider.start_urls = [self.mockserver.url(\"/\")]\n crawler = get_crawler(TestSpider, settings)\n yield crawler.crawl()\n\n assert len(CustomS3FeedStorage.stubs) == len(items)\n for stub in CustomS3FeedStorage.stubs[:-1]:\n stub.assert_no_pending_responses()\n assert (\n \"feedexport/success_count/CustomS3FeedStorage\" in crawler.stats.get_stats()\n )\n assert (\n crawler.stats.get_value(\"feedexport/success_count/CustomS3FeedStorage\") == 3\n )\n\n\n# Test that the FeedExporer sends the feed_exporter_closed and feed_slot_closed signals\nclass TestFeedExporterSignals:\n items = [\n {\"foo\": \"bar1\", \"egg\": \"spam1\"},\n {\"foo\": \"bar2\", \"egg\": \"spam2\", \"baz\": \"quux2\"},\n {\"foo\": \"bar3\", \"baz\": \"quux3\"},\n ]\n\n with tempfile.NamedTemporaryFile(suffix=\"json\") as tmp:\n settings = {\n \"FEEDS\": {\n f\"file:///{tmp.name}\": {\n \"format\": \"json\",\n },\n },\n }\n\n def feed_exporter_closed_signal_handler(self):\n self.feed_exporter_closed_received = True\n\n def feed_slot_closed_signal_handler(self, slot):\n self.feed_slot_closed_received = True\n\n async def feed_exporter_closed_signal_handler_async(self):\n self.feed_exporter_closed_received = True\n\n async def feed_slot_closed_signal_handler_async(self, slot):\n self.feed_slot_closed_received = True\n\n async def run_signaled_feed_exporter(\n self, feed_exporter_signal_handler: Callable, feed_slot_signal_handler: Callable\n ) -> None:\n crawler = get_crawler(settings_dict=self.settings)\n feed_exporter = FeedExporter.from_crawler(crawler)\n spider = scrapy.Spider(\"default\")\n spider.crawler = crawler\n crawler.signals.connect(\n feed_exporter_signal_handler,\n signal=signals.feed_exporter_closed,\n )\n crawler.signals.connect(\n feed_slot_signal_handler, signal=signals.feed_slot_closed\n )\n feed_exporter.open_spider(spider)\n for item in self.items:\n feed_exporter.item_scraped(item, spider)\n await feed_exporter.close_spider(spider)\n\n @coroutine_test\n async def test_feed_exporter_signals_sent(self) -> None:\n self.feed_exporter_closed_received = False\n self.feed_slot_closed_received = False\n\n await self.run_signaled_feed_exporter(\n self.feed_exporter_closed_signal_handler,\n self.feed_slot_closed_signal_handler,\n )\n assert self.feed_slot_closed_received\n assert self.feed_exporter_closed_received\n\n @coroutine_test\n async def test_feed_exporter_signals_sent_async(self) -> None:\n self.feed_exporter_closed_received = False\n self.feed_slot_closed_received = False\n\n await self.run_signaled_feed_exporter(\n self.feed_exporter_closed_signal_handler_async,\n self.feed_slot_closed_signal_handler_async,\n )\n assert self.feed_slot_closed_received\n assert self.feed_exporter_closed_received\n\n\nclass TestFeedExportInit:\n def test_unsupported_storage(self):\n settings = {\n \"FEEDS\": {\n \"unsupported://uri\": {},\n },\n }\n crawler = get_crawler(settings_dict=settings)\n with pytest.raises(NotConfigured):\n FeedExporter.from_crawler(crawler)\n\n def test_unsupported_format(self):\n settings = {\n \"FEEDS\": {\n \"file://path\": {\n \"format\": \"unsupported_format\",\n },\n },\n }\n crawler = get_crawler(settings_dict=settings)\n with pytest.raises(NotConfigured):\n FeedExporter.from_crawler(crawler)\n\n def test_absolute_pathlib_as_uri(self):\n with tempfile.NamedTemporaryFile(suffix=\"json\") as tmp:\n settings = {\n \"FEEDS\": {\n Path(tmp.name).resolve(): {\n \"format\": \"json\",\n },\n },\n }\n crawler = get_crawler(settings_dict=settings)\n exporter = FeedExporter.from_crawler(crawler)\n assert isinstance(exporter, FeedExporter)\n\n def test_relative_pathlib_as_uri(self):\n settings = {\n \"FEEDS\": {\n Path(\"./items.json\"): {\n \"format\": \"json\",\n },\n },\n }\n crawler = get_crawler(settings_dict=settings)\n exporter = FeedExporter.from_crawler(crawler)\n assert isinstance(exporter, FeedExporter)\n\n\nclass TestURIParams(ABC):\n spider_name = \"uri_params_spider\"\n deprecated_options = False\n\n @abstractmethod\n def build_settings(self, uri=\"file:///tmp/foobar\", uri_params=None):\n raise NotImplementedError\n\n def _crawler_feed_exporter(self, settings):\n if self.deprecated_options:\n with pytest.warns(\n ScrapyDeprecationWarning,\n match=\"The `FEED_URI` and `FEED_FORMAT` settings have been deprecated\",\n ):\n crawler = get_crawler(settings_dict=settings)\n else:\n crawler = get_crawler(settings_dict=settings)\n feed_exporter = crawler.get_extension(FeedExporter)\n return crawler, feed_exporter\n\n def test_default(self):\n settings = self.build_settings(\n uri=\"file:///tmp/%(name)s\",\n )\n crawler, feed_exporter = self._crawler_feed_exporter(settings)\n spider = scrapy.Spider(self.spider_name)\n spider.crawler = crawler\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", ScrapyDeprecationWarning)\n feed_exporter.open_spider(spider)\n\n assert feed_exporter.slots[0].uri == f\"file:///tmp/{self.spider_name}\"\n\n def test_none(self):\n def uri_params(params, spider):\n pass\n\n settings = self.build_settings(\n uri=\"file:///tmp/%(name)s\",\n uri_params=uri_params,\n )\n crawler, feed_exporter = self._crawler_feed_exporter(settings)\n spider = scrapy.Spider(self.spider_name)\n spider.crawler = crawler\n\n feed_exporter.open_spider(spider)\n\n assert feed_exporter.slots[0].uri == f\"file:///tmp/{self.spider_name}\"\n\n def test_empty_dict(self):\n def uri_params(params, spider):\n return {}\n\n settings = self.build_settings(\n uri=\"file:///tmp/%(name)s\",\n uri_params=uri_params,\n )\n crawler, feed_exporter = self._crawler_feed_exporter(settings)\n spider = scrapy.Spider(self.spider_name)\n spider.crawler = crawler\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", ScrapyDeprecationWarning)\n with pytest.raises(KeyError):\n feed_exporter.open_spider(spider)\n\n def test_params_as_is(self):\n def uri_params(params, spider):\n return params\n\n settings = self.build_settings(\n uri=\"file:///tmp/%(name)s\",\n uri_params=uri_params,\n )\n crawler, feed_exporter = self._crawler_feed_exporter(settings)\n spider = scrapy.Spider(self.spider_name)\n spider.crawler = crawler\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", ScrapyDeprecationWarning)\n feed_exporter.open_spider(spider)\n\n assert feed_exporter.slots[0].uri == f\"file:///tmp/{self.spider_name}\"\n\n def test_custom_param(self):\n def uri_params(params, spider):\n return {**params, \"foo\": self.spider_name}\n\n settings = self.build_settings(\n uri=\"file:///tmp/%(foo)s\",\n uri_params=uri_params,\n )\n crawler, feed_exporter = self._crawler_feed_exporter(settings)\n spider = scrapy.Spider(self.spider_name)\n spider.crawler = crawler\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", ScrapyDeprecationWarning)\n feed_exporter.open_spider(spider)\n\n assert feed_exporter.slots[0].uri == f\"file:///tmp/{self.spider_name}\"\n\n\nclass TestURIParamsSetting(TestURIParams):\n deprecated_options = True\n\n def build_settings(self, uri=\"file:///tmp/foobar\", uri_params=None):\n extra_settings = {}\n if uri_params:\n extra_settings[\"FEED_URI_PARAMS\"] = uri_params\n return {\n \"FEED_URI\": uri,\n **extra_settings,\n }\n\n\nclass TestURIParamsFeedOption(TestURIParams):\n deprecated_options = False\n\n def build_settings(self, uri=\"file:///tmp/foobar\", uri_params=None):\n options = {\n \"format\": \"jl\",\n }\n if uri_params:\n options[\"uri_params\"] = uri_params\n return {\n \"FEEDS\": {\n uri: options,\n },\n }\n", "framework": "pytest", "test_command": "pytest tests/test_feedexport.py::TestS3FeedStorage::test_store -xvs"}, {"test_file": "tests/test_command_check.py", "test_function": "TestCheckCommand.test_printSummary_with_unsuccessful_test_result_without_errors_and_without_failures", "test_content": "from __future__ import annotations\n\nimport sys\nfrom io import StringIO\nfrom typing import TYPE_CHECKING\nfrom unittest import TestCase\nfrom unittest.mock import Mock, PropertyMock, call, patch\n\nfrom scrapy.commands.check import Command, TextTestResult\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass DummyTestCase(TestCase):\n pass\n\n\nclass TestCheckCommand(TestProjectBase):\n spider_name = \"check_spider\"\n\n def _write_contract(self, proj_path: Path, contracts: str, parse_def: str) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"checkspider.py\"\n spider.write_text(\n f\"\"\"\nimport scrapy\n\nclass CheckSpider(scrapy.Spider):\n name = '{self.spider_name}'\n start_urls = ['data:,']\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response, **cb_kwargs):\n \\\"\\\"\\\"\n @url data:,\n {contracts}\n \\\"\\\"\\\"\n {parse_def}\n \"\"\",\n encoding=\"utf-8\",\n )\n\n def _test_contract(\n self, proj_path: Path, contracts: str = \"\", parse_def: str = \"pass\"\n ) -> None:\n self._write_contract(proj_path, contracts, parse_def)\n ret, out, err = proc(\"check\", cwd=proj_path)\n assert \"F\" not in out\n assert \"OK\" in err\n assert ret == 0\n\n def test_check_returns_requests_contract(self, proj_path: Path) -> None:\n contracts = \"\"\"\n @returns requests 1\n \"\"\"\n parse_def = \"\"\"\n yield scrapy.Request(url='http://next-url.com')\n \"\"\"\n self._test_contract(proj_path, contracts, parse_def)\n\n def test_check_returns_items_contract(self, proj_path: Path) -> None:\n contracts = \"\"\"\n @returns items 1\n \"\"\"\n parse_def = \"\"\"\n yield {'key1': 'val1', 'key2': 'val2'}\n \"\"\"\n self._test_contract(proj_path, contracts, parse_def)\n\n def test_check_cb_kwargs_contract(self, proj_path: Path) -> None:\n contracts = \"\"\"\n @cb_kwargs {\"arg1\": \"val1\", \"arg2\": \"val2\"}\n \"\"\"\n parse_def = \"\"\"\n if len(cb_kwargs.items()) == 0:\n raise Exception(\"Callback args not set\")\n \"\"\"\n self._test_contract(proj_path, contracts, parse_def)\n\n def test_check_scrapes_contract(self, proj_path: Path) -> None:\n contracts = \"\"\"\n @scrapes key1 key2\n \"\"\"\n parse_def = \"\"\"\n yield {'key1': 'val1', 'key2': 'val2'}\n \"\"\"\n self._test_contract(proj_path, contracts, parse_def)\n\n def test_check_all_default_contracts(self, proj_path: Path) -> None:\n contracts = \"\"\"\n @returns items 1\n @returns requests 1\n @scrapes key1 key2\n @cb_kwargs {\"arg1\": \"val1\", \"arg2\": \"val2\"}\n \"\"\"\n parse_def = \"\"\"\n yield {'key1': 'val1', 'key2': 'val2'}\n yield scrapy.Request(url='http://next-url.com')\n if len(cb_kwargs.items()) == 0:\n raise Exception(\"Callback args not set\")\n \"\"\"\n self._test_contract(proj_path, contracts, parse_def)\n\n def test_SCRAPY_CHECK_set(self, proj_path: Path) -> None:\n parse_def = \"\"\"\n import os\n if not os.environ.get('SCRAPY_CHECK'):\n raise Exception('SCRAPY_CHECK not set')\n \"\"\"\n self._test_contract(proj_path, parse_def=parse_def)\n\n def test_printSummary_with_unsuccessful_test_result_without_errors_and_without_failures(\n self,\n ) -> None:\n result = TextTestResult(Mock(), descriptions=False, verbosity=1)\n start_time = 1.0\n stop_time = 2.0\n result.testsRun = 5\n result.failures = []\n result.errors = []\n result.unexpectedSuccesses = [DummyTestCase(), DummyTestCase()]\n with patch.object(result.stream, \"write\") as mock_write:\n result.printSummary(start_time, stop_time)\n mock_write.assert_has_calls([call(\"FAILED\"), call(\"\\n\")])\n\n def test_printSummary_with_unsuccessful_test_result_with_only_failures(\n self,\n ) -> None:\n result = TextTestResult(Mock(), descriptions=False, verbosity=1)\n start_time = 1.0\n stop_time = 2.0\n result.testsRun = 5\n result.failures = [(DummyTestCase(), \"failure\")]\n result.errors = []\n with patch.object(result.stream, \"writeln\") as mock_write:\n result.printSummary(start_time, stop_time)\n mock_write.assert_called_with(\" (failures=1)\")\n\n def test_printSummary_with_unsuccessful_test_result_with_only_errors(self) -> None:\n result = TextTestResult(Mock(), descriptions=False, verbosity=1)\n start_time = 1.0\n stop_time = 2.0\n result.testsRun = 5\n result.failures = []\n result.errors = [(DummyTestCase(), \"error\")]\n with patch.object(result.stream, \"writeln\") as mock_write:\n result.printSummary(start_time, stop_time)\n mock_write.assert_called_with(\" (errors=1)\")\n\n def test_printSummary_with_unsuccessful_test_result_with_both_failures_and_errors(\n self,\n ) -> None:\n result = TextTestResult(Mock(), descriptions=False, verbosity=1)\n start_time = 1.0\n stop_time = 2.0\n result.testsRun = 5\n result.failures = [(DummyTestCase(), \"failure\")]\n result.errors = [(DummyTestCase(), \"error\")]\n with patch.object(result.stream, \"writeln\") as mock_write:\n result.printSummary(start_time, stop_time)\n mock_write.assert_called_with(\" (failures=1, errors=1)\")\n\n @patch(\"scrapy.commands.check.ContractsManager\")\n def test_run_with_opts_list_prints_spider(self, cm_cls_mock) -> None:\n output = StringIO()\n sys.stdout = output\n cmd = Command()\n cmd.settings = Mock(getwithbase=Mock(return_value={}))\n cm_cls_mock.return_value = cm_mock = Mock()\n spider_loader_mock = Mock()\n cmd.crawler_process = Mock(spider_loader=spider_loader_mock)\n spider_name = \"FakeSpider\"\n spider_cls_mock = Mock()\n type(spider_cls_mock).name = PropertyMock(return_value=spider_name)\n spider_loader_mock.load.side_effect = lambda x: {spider_name: spider_cls_mock}[\n x\n ]\n tested_methods = [\"fakeMethod1\", \"fakeMethod2\"]\n cm_mock.tested_methods_from_spidercls.side_effect = lambda x: {\n spider_cls_mock: tested_methods\n }[x]\n\n cmd.run([spider_name], Mock(list=True))\n\n assert output.getvalue() == \"FakeSpider\\n * fakeMethod1\\n * fakeMethod2\\n\"\n sys.stdout = sys.__stdout__\n\n @patch(\"scrapy.commands.check.ContractsManager\")\n def test_run_without_opts_list_does_not_crawl_spider_with_no_tested_methods(\n self, cm_cls_mock\n ) -> None:\n cmd = Command()\n cmd.settings = Mock(getwithbase=Mock(return_value={}))\n cm_cls_mock.return_value = cm_mock = Mock()\n spider_loader_mock = Mock()\n cmd.crawler_process = Mock(spider_loader=spider_loader_mock)\n spider_name = \"FakeSpider\"\n spider_cls_mock = Mock()\n spider_loader_mock.load.side_effect = lambda x: {spider_name: spider_cls_mock}[\n x\n ]\n tested_methods: list[str] = []\n cm_mock.tested_methods_from_spidercls.side_effect = lambda x: {\n spider_cls_mock: tested_methods\n }[x]\n\n cmd.run([spider_name], Mock(list=False))\n\n cmd.crawler_process.crawl.assert_not_called()\n", "framework": "pytest", "test_command": "pytest tests/test_command_check.py::TestCheckCommand::test_printSummary_with_unsuccessful_test_result_without_errors_and_without_failures -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectCommand.test_startproject", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectCommand::test_startproject -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectCommand.test_startproject_with_project_dir", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectCommand::test_startproject_with_project_dir -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectCommand.test_existing_project_dir", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectCommand::test_existing_project_dir -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectTemplates.test_startproject_permissions_from_read_only", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectTemplates::test_startproject_permissions_from_read_only -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectTemplates.test_startproject_permissions_unchanged_in_destination", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectTemplates::test_startproject_permissions_unchanged_in_destination -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectTemplates.test_startproject_permissions_umask_022", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectTemplates::test_startproject_permissions_umask_022 -xvs"}] | {"repo_url": "https://github.com/scrapy/scrapy", "install_cmd": "pip install -e .", "commit_sha": "e02ad08672a5946f659acf4874c4a315e7886346", "frozen_requirements": "frozen_requirements/scrapy_scrapy.txt"} | {"body_lines": 8, "file_lines": 39, "has_docstring": false, "num_tests": 20} | {"status": "partial_pass", "note": "environment-specific test failures"} | repo_patch/0050 | clean | |
repo_patch/0034 | scrapy/scrapy | tests/utils/cmdline.py | proc | proc | function | null | from __future__ import annotations
import subprocess
import sys
from typing import Any
import pytest
from scrapy.utils.test import get_testenv
def call(*args: str, **popen_kwargs: Any) -> int:
args = (sys.executable, "-m", "scrapy.cmdline", *args)
return subprocess.call(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=get_testenv(),
**popen_kwargs,
)
def proc(*args: str, **popen_kwargs: Any) -> tuple[int, str, str]:
# TODO: Implement this function | def proc(*args: str, **popen_kwargs: Any) -> tuple[int, str, str]: | args = (sys.executable, "-m", "scrapy.cmdline", *args)
try:
p = subprocess.run(
args,
check=False,
capture_output=True,
encoding="utf-8",
timeout=15,
env=get_testenv(),
**popen_kwargs,
)
except subprocess.TimeoutExpired:
pytest.fail("Command took too much time to complete")
return p.returncode, p.stdout, p.stderr | def proc(*args: str, **popen_kwargs: Any) -> tuple[int, str, str]:
args = (sys.executable, "-m", "scrapy.cmdline", *args)
try:
p = subprocess.run(
args,
check=False,
capture_output=True,
encoding="utf-8",
timeout=15,
env=get_testenv(),
**popen_kwargs,
)
except subprocess.TimeoutExpired:
pytest.fail("Command took too much time to complete")
return p.returncode, p.stdout, p.stderr | [{"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_spider_arguments", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_spider_arguments -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_request_with_meta", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_request_with_meta -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_request_with_cb_kwargs", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_request_with_cb_kwargs -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_request_without_meta", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_request_without_meta -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_pipelines", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_pipelines -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_async_def_asyncio_parse_items_list", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_async_def_asyncio_parse_items_list -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_async_def_asyncio_parse_items_single_element", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_async_def_asyncio_parse_items_single_element -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_async_def_asyncgen_parse_loop", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_async_def_asyncgen_parse_loop -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_async_def_asyncgen_parse_exc", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_async_def_asyncgen_parse_exc -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_async_def_asyncio_parse", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_async_def_asyncio_parse -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_parse_items", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_parse_items -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_parse_items_no_callback_passed", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_parse_items_no_callback_passed -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_wrong_callback_passed", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_wrong_callback_passed -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_crawlspider_matching_rule_callback_set", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_crawlspider_matching_rule_callback_set -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_crawlspider_matching_rule_default_callback", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_crawlspider_matching_rule_default_callback -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_spider_with_no_rules_attribute", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_spider_with_no_rules_attribute -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_crawlspider_missing_callback", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_crawlspider_missing_callback -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_crawlspider_no_matching_rule", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_crawlspider_no_matching_rule -xvs"}, {"test_file": "tests/test_command_parse.py", "test_function": "TestParseCommand.test_output_flag", "test_content": "from __future__ import annotations\n\nimport argparse\nimport re\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom scrapy.commands import parse\nfrom scrapy.settings import Settings\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n from tests.mockserver.http import MockServer\n\n\nclass TestParseCommand(TestProjectBase):\n spider_name = \"parse_spider\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"myspider.py\").write_text(\n f\"\"\"\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.utils.test import get_from_asyncio_queue\nimport asyncio\n\n\nclass BaseSpider(scrapy.Spider):\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n\nclass AsyncDefAsyncioReturnSpider(BaseSpider):\n name = \"asyncdef_asyncio_return\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return [{{'id': 1}}, {{'id': 2}}]\n\nclass AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):\n name = \"asyncdef_asyncio_return_single_element\"\n\n async def parse(self, response):\n await asyncio.sleep(0.1)\n status = await get_from_asyncio_queue(response.status)\n self.logger.info(f\"Got response {{status}}\")\n return {{'foo': 42}}\n\nclass AsyncDefAsyncioGenLoopSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_loop\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n self.logger.info(f\"Got response {{response.status}}\")\n\nclass AsyncDefAsyncioSpider(BaseSpider):\n name = \"asyncdef_asyncio\"\n\n async def parse(self, response):\n await asyncio.sleep(0.2)\n status = await get_from_asyncio_queue(response.status)\n self.logger.debug(f\"Got response {{status}}\")\n\nclass AsyncDefAsyncioGenExcSpider(BaseSpider):\n name = \"asyncdef_asyncio_gen_exc\"\n\n async def parse(self, response):\n for i in range(10):\n await asyncio.sleep(0.1)\n yield {{'foo': i}}\n if i > 5:\n raise ValueError(\"Stopping the processing\")\n\nclass CallbackSignatureDownloaderMiddleware:\n def process_request(self, request, spider):\n from inspect import signature\n spider.logger.debug(f\"request.callback signature: {{signature(request.callback)}}\")\n\n\nclass MySpider(scrapy.Spider):\n name = '{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOADER_MIDDLEWARES\": {{\n CallbackSignatureDownloaderMiddleware: 0,\n }},\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n def parse(self, response):\n if getattr(self, 'test_arg', None):\n self.logger.debug('It Works!')\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse_request_with_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Does Not Work :(')\n else:\n self.logger.debug('It Works!')\n\n def parse_request_with_cb_kwargs(self, response, foo=None, key=None):\n if foo == 'bar' and key == 'value':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\n def parse_request_without_meta(self, response):\n foo = response.meta.get('foo', 'bar')\n\n if foo == 'bar':\n self.logger.debug('It Works!')\n else:\n self.logger.debug('It Does Not Work :(')\n\nclass MyGoodCrawlSpider(CrawlSpider):\n name = 'goodcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=r'/text'), follow=True),\n )\n\n def parse_item(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\n def parse(self, response):\n return [scrapy.Item(), dict(nomatch='default')]\n\n\nclass MyBadCrawlSpider(CrawlSpider):\n '''Spider which doesn't define a parse_item callback while using it in a rule.'''\n name = 'badcrawl{self.spider_name}'\n\n custom_settings = {{\n \"DOWNLOAD_DELAY\": 0,\n }}\n\n rules = (\n Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),\n )\n\n def parse(self, response):\n return [scrapy.Item(), dict(foo='bar')]\n\"\"\",\n encoding=\"utf-8\",\n )\n\n (proj_mod_path / \"pipelines.py\").write_text(\n \"\"\"\nimport logging\n\nclass MyPipeline:\n component_name = 'my_pipeline'\n\n def process_item(self, item):\n logging.info('It Works!')\n return item\n\"\"\",\n encoding=\"utf-8\",\n )\n\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(\n f\"\"\"\nITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}\n\"\"\"\n )\n\n def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-a\",\n \"test_arg=1\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:\n raw_json_string = '{\"foo\" : \"baz\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--meta\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-m\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_meta\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_request_with_cb_kwargs(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n raw_json_string = '{\"foo\" : \"bar\", \"key\": \"value\"}'\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--cbkwargs\",\n raw_json_string,\n \"-c\",\n \"parse_request_with_cb_kwargs\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n assert (\n \"DEBUG: request.callback signature: (response, foo=None, key=None)\"\n in stderr\n )\n\n def test_request_without_meta(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse_request_without_meta\",\n \"--nolinks\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: It Works!\" in stderr\n\n def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"--pipelines\",\n \"-c\",\n \"parse\",\n \"--verbose\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: It Works!\" in stderr\n\n def test_async_def_asyncio_parse_items_list(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'id': 1}\" in out\n assert \"{'id': 2}\" in out\n\n def test_async_def_asyncio_parse_items_single_element(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_return_single_element\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n assert \"{'foo': 42}\" in out\n\n def test_async_def_asyncgen_parse_loop(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_loop\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"INFO: Got response 200\" in stderr\n for i in range(10):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncgen_parse_exc(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio_gen_exc\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"ValueError\" in stderr\n for i in range(7):\n assert f\"{{'foo': {i}}}\" in out\n\n def test_async_def_asyncio_parse(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, _, stderr = proc(\n \"parse\",\n \"--spider\",\n \"asyncdef_asyncio\",\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"DEBUG: Got response 200\" in stderr\n\n def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_parse_items_no_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_wrong_callback_passed(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"dummy\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find callback\" in stderr\n\n def test_crawlspider_matching_rule_callback_set(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule matches the URL, use it's defined callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert \"[{}, {'foo': 'bar'}]\" in out\n\n def test_crawlspider_matching_rule_default_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"If a rule match but it has no callback set, use the 'parse' callback.\"\"\"\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"goodcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/text\"),\n cwd=proj_path,\n )\n assert \"[{}, {'nomatch': 'default'}]\" in out\n\n def test_spider_with_no_rules_attribute(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"Using -r with a spider with no rule should not produce items.\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"No CrawlSpider rules found\" in stderr\n\n def test_crawlspider_missing_callback(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n _, out, _ = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n\n def test_crawlspider_no_matching_rule(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n \"\"\"The requested URL has no matching rule, so no items should be scraped\"\"\"\n _, out, stderr = proc(\n \"parse\",\n \"--spider\",\n \"badcrawl\" + self.spider_name,\n \"-r\",\n mockserver.url(\"/enc-gb18030\"),\n cwd=proj_path,\n )\n assert re.search(r\"# Scraped Items -+\\r?\\n\\[\\]\", out)\n assert \"Cannot find a rule that matches\" in stderr\n\n def test_crawlspider_not_exists_with_not_matched_url(\n self, proj_path: Path, mockserver: MockServer\n ) -> None:\n assert call(\"parse\", mockserver.url(\"/invalid_url\"), cwd=proj_path) == 0\n\n def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:\n \"\"\"Checks if a file was created successfully having\n correct format containing correct data in it.\n \"\"\"\n file_name = \"data.json\"\n file_path = proj_path / file_name\n proc(\n \"parse\",\n \"--spider\",\n self.spider_name,\n \"-c\",\n \"parse\",\n \"-o\",\n file_name,\n mockserver.url(\"/html\"),\n cwd=proj_path,\n )\n\n assert file_path.exists()\n assert file_path.is_file()\n\n content = '[\\n{},\\n{\"foo\": \"bar\"}\\n]'\n assert file_path.read_text(encoding=\"utf-8\") == content\n\n def test_parse_add_options(self):\n command = parse.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n formatter_class=argparse.HelpFormatter,\n conflict_handler=\"resolve\",\n prefix_chars=\"-\",\n )\n command.add_options(parser)\n namespace = parser.parse_args(\n [\"--verbose\", \"--nolinks\", \"-d\", \"2\", \"--spider\", self.spider_name]\n )\n assert namespace.nolinks\n assert namespace.depth == 2\n assert namespace.spider == self.spider_name\n assert namespace.verbose\n", "framework": "pytest", "test_command": "pytest tests/test_command_parse.py::TestParseCommand::test_output_flag -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_template", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_template -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderCommand.test_same_filename_as_existing_spider", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderCommand::test_same_filename_as_existing_spider -xvs"}, {"test_file": "tests/test_command_genspider.py", "test_function": "TestGenspiderStandaloneCommand.test_same_name_as_existing_file", "test_content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\n\nimport pytest\n\nfrom tests.test_commands import TestProjectBase\nfrom tests.utils.cmdline import call, proc\n\n\ndef find_in_file(filename: Path, regex: str) -> re.Match | None:\n \"\"\"Find first pattern occurrence in file\"\"\"\n pattern = re.compile(regex)\n with filename.open(\"r\", encoding=\"utf-8\") as f:\n for line in f:\n match = pattern.search(line)\n if match is not None:\n return match\n return None\n\n\nclass TestGenspiderCommand(TestProjectBase):\n def test_arguments(self, proj_path: Path) -> None:\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n # only pass one argument. spider script shouldn't be created\n assert call(\"genspider\", \"test_name\", cwd=proj_path) == 2\n assert not spider.exists()\n # pass two arguments <name> <domain>. spider script should be created\n assert call(\"genspider\", \"test_name\", \"test.com\", cwd=proj_path) == 0\n assert spider.exists()\n\n @pytest.mark.parametrize(\n \"tplname\",\n [\n \"basic\",\n \"crawl\",\n \"xmlfeed\",\n \"csvfeed\",\n ],\n )\n def test_template(self, tplname: str, proj_path: Path) -> None:\n args = [f\"--template={tplname}\"] if tplname else []\n spname = \"test_spider\"\n spmodule = f\"{self.project_name}.spiders.{spname}\"\n spfile = proj_path / self.project_name / \"spiders\" / f\"{spname}.py\"\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert (\n f\"Created spider {spname!r} using template {tplname!r} in module:\\n {spmodule}\"\n in out\n )\n assert spfile.exists()\n modify_time_before = spfile.stat().st_mtime\n _, out, _ = proc(\"genspider\", spname, \"test.com\", *args, cwd=proj_path)\n assert f\"Spider {spname!r} already exists in module\" in out\n modify_time_after = spfile.stat().st_mtime\n assert modify_time_after == modify_time_before\n\n def test_list(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--list\", cwd=proj_path) == 0\n\n def test_dump(self, proj_path: Path) -> None:\n assert call(\"genspider\", \"--dump=basic\", cwd=proj_path) == 0\n assert call(\"genspider\", \"-d\", \"basic\", cwd=proj_path) == 0\n\n def test_same_name_as_project(self, proj_path: Path) -> None:\n assert call(\"genspider\", self.project_name, cwd=proj_path) == 2\n assert not (\n proj_path / self.project_name / \"spiders\" / f\"{self.project_name}.py\"\n ).exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_filename_as_existing_spider(\n self, force: bool, proj_path: Path\n ) -> None:\n file_name = \"example\"\n file_path = proj_path / self.project_name / \"spiders\" / f\"{file_name}.py\"\n assert call(\"genspider\", file_name, \"example.com\", cwd=proj_path) == 0\n assert file_path.exists()\n\n # change name of spider but not its file name\n with file_path.open(\"r+\", encoding=\"utf-8\") as spider_file:\n file_data = spider_file.read()\n file_data = file_data.replace('name = \"example\"', 'name = \"renamed\"')\n spider_file.seek(0)\n spider_file.write(file_data)\n spider_file.truncate()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_data\n\n if force:\n _, out, _ = proc(\n \"genspider\", \"--force\", file_name, \"example.com\", cwd=proj_path\n )\n assert (\n f\"Created spider {file_name!r} using template 'basic' in module\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=proj_path)\n assert f\"{file_path.resolve()} already exists\" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n\n @pytest.mark.parametrize(\n (\"url\", \"domain\"),\n [\n (\"test.com\", \"test.com\"),\n (\"https://test.com\", \"test.com\"),\n ],\n )\n def test_url(self, url: str, domain: str, proj_path: Path) -> None:\n assert call(\"genspider\", \"--force\", \"test_name\", url, cwd=proj_path) == 0\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"allowed_domains\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == domain\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == f\"https://{domain}\"\n\n @pytest.mark.parametrize(\n (\"url\", \"expected\", \"template\"),\n [\n # basic\n (\"https://test.com\", \"https://test.com\", \"basic\"),\n (\"http://test.com\", \"http://test.com\", \"basic\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"basic\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"basic\"),\n # crawl\n (\"https://test.com\", \"https://test.com\", \"crawl\"),\n (\"http://test.com\", \"http://test.com\", \"crawl\"),\n (\"http://test.com/other/path\", \"http://test.com/other/path\", \"crawl\"),\n (\"test.com/other/path\", \"https://test.com/other/path\", \"crawl\"),\n (\"test.com\", \"https://test.com\", \"crawl\"),\n # xmlfeed\n (\"https://test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"xmlfeed\"),\n (\"test.com/feed.xml\", \"https://test.com/feed.xml\", \"xmlfeed\"),\n # csvfeed\n (\"https://test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n (\"http://test.com/feed.xml\", \"http://test.com/feed.xml\", \"csvfeed\"),\n (\"test.com/feed.csv\", \"https://test.com/feed.csv\", \"csvfeed\"),\n ],\n )\n def test_template_start_urls(\n self, url: str, expected: str, template: str, proj_path: Path\n ) -> None:\n assert (\n call(\n \"genspider\", \"-t\", template, \"--force\", \"test_name\", url, cwd=proj_path\n )\n == 0\n )\n spider = proj_path / self.project_name / \"spiders\" / \"test_name.py\"\n m = find_in_file(spider, r\"start_urls\\s*=\\s*\\[['\\\"](.+)['\\\"]\\]\")\n assert m is not None\n assert m.group(1) == expected\n\n\nclass TestGenspiderStandaloneCommand:\n def test_generate_standalone_spider(self, tmp_path: Path) -> None:\n call(\"genspider\", \"example\", \"example.com\", cwd=tmp_path)\n assert Path(tmp_path, \"example.py\").exists()\n\n @pytest.mark.parametrize(\"force\", [True, False])\n def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None:\n file_name = \"example\"\n file_path = Path(tmp_path, file_name + \".py\")\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert f\"Created spider {file_name!r} using template 'basic' \" in out\n assert file_path.exists()\n modify_time_before = file_path.stat().st_mtime\n file_contents_before = file_path.read_text(encoding=\"utf-8\")\n\n if force:\n # use different template to ensure contents were changed\n _, out, _ = proc(\n \"genspider\",\n \"--force\",\n \"-t\",\n \"crawl\",\n file_name,\n \"example.com\",\n cwd=tmp_path,\n )\n assert f\"Created spider {file_name!r} using template 'crawl' \" in out\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after != modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after != file_contents_before\n else:\n _, out, _ = proc(\"genspider\", file_name, \"example.com\", cwd=tmp_path)\n assert (\n f\"{Path(tmp_path, file_name + '.py').resolve()} already exists\" in out\n )\n modify_time_after = file_path.stat().st_mtime\n assert modify_time_after == modify_time_before\n file_contents_after = file_path.read_text(encoding=\"utf-8\")\n assert file_contents_after == file_contents_before\n", "framework": "pytest", "test_command": "pytest tests/test_command_genspider.py::TestGenspiderStandaloneCommand::test_same_name_as_existing_file -xvs"}, {"test_file": "tests/test_command_runspider.py", "test_function": "TestRunSpiderCommand.test_runspider_file_not_found", "test_content": "from __future__ import annotations\n\nimport asyncio\nimport inspect\nimport platform\nimport sys\nfrom typing import TYPE_CHECKING\n\nimport pytest\n\nfrom tests.test_crawler import ExceptionSpider, NoRequestsSpider\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n from pathlib import Path\n\n\nclass TestRunSpiderCommand:\n spider_filename = \"myspider.py\"\n\n debug_log_spider = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n async def start(self):\n self.logger.debug(\"It Works!\")\n return\n yield\n\"\"\"\n\n badspider = \"\"\"\nimport scrapy\n\nclass BadSpider(scrapy.Spider):\n name = \"bad\"\n async def start(self):\n raise Exception(\"oops!\")\n yield\n \"\"\"\n\n def runspider(\n self, cwd: Path, code: str, name: str | None = None, args: Iterable[str] = ()\n ) -> tuple[int, str, str]:\n fname = cwd / (name or self.spider_filename)\n fname.write_text(code, encoding=\"utf-8\")\n return proc(\"runspider\", str(fname), *args, cwd=cwd)\n\n def get_log(\n self, cwd: Path, code: str, name: str | None = None, args: Iterable[str] = ()\n ) -> str:\n _, _, stderr = self.runspider(cwd, code, name, args=args)\n return stderr\n\n def test_runspider(self, tmp_path: Path) -> None:\n log = self.get_log(tmp_path, self.debug_log_spider)\n assert \"DEBUG: It Works!\" in log\n assert (\n \"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor\"\n in log\n )\n assert \"INFO: Spider closed (finished)\" in log\n\n def test_run_fail_spider(self, tmp_path: Path) -> None:\n ret, _, _ = self.runspider(\n tmp_path, \"import scrapy\\n\" + inspect.getsource(ExceptionSpider)\n )\n assert ret != 0\n\n def test_run_good_spider(self, tmp_path: Path) -> None:\n ret, _, _ = self.runspider(\n tmp_path, \"import scrapy\\n\" + inspect.getsource(NoRequestsSpider)\n )\n assert ret == 0\n\n def test_runspider_log_level(self, tmp_path: Path) -> None:\n log = self.get_log(\n tmp_path, self.debug_log_spider, args=(\"-s\", \"LOG_LEVEL=INFO\")\n )\n assert \"DEBUG: It Works!\" not in log\n assert \"INFO: Spider opened\" in log\n\n def test_runspider_default_reactor(self, tmp_path: Path) -> None:\n log = self.get_log(\n tmp_path, self.debug_log_spider, args=(\"-s\", \"TWISTED_REACTOR=\")\n )\n assert \"DEBUG: It Works!\" in log\n assert (\n \"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor\"\n not in log\n )\n assert \"INFO: Spider opened\" in log\n assert \"INFO: Closing spider (finished)\" in log\n assert \"INFO: Spider closed (finished)\" in log\n\n def test_runspider_dnscache_disabled(self, tmp_path: Path) -> None:\n # see https://github.com/scrapy/scrapy/issues/2811\n # The spider below should not be able to connect to localhost:12345,\n # which is intended,\n # but this should not be because of DNS lookup error\n # assumption: localhost will resolve in all cases (true?)\n dnscache_spider = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n start_urls = ['http://localhost:12345']\n\n custom_settings = {\n \"ROBOTSTXT_OBEY\": False,\n \"RETRY_ENABLED\": False,\n }\n\n def parse(self, response):\n return {'test': 'value'}\n\"\"\"\n log = self.get_log(\n tmp_path, dnscache_spider, args=(\"-s\", \"DNSCACHE_ENABLED=False\")\n )\n assert \"CannotResolveHostError\" not in log\n assert \"INFO: Spider opened\" in log\n\n @pytest.mark.parametrize(\"value\", [False, True])\n def test_runspider_log_short_names(self, tmp_path: Path, value: bool) -> None:\n log1 = self.get_log(\n tmp_path, self.debug_log_spider, args=(\"-s\", f\"LOG_SHORT_NAMES={value}\")\n )\n assert \"[myspider] DEBUG: It Works!\" in log1\n assert (\"[scrapy]\" in log1) is value\n assert (\"[scrapy.core.engine]\" in log1) is not value\n\n def test_runspider_no_spider_found(self, tmp_path: Path) -> None:\n log = self.get_log(tmp_path, \"from scrapy.spiders import Spider\\n\")\n assert \"No spider found in file\" in log\n\n def test_runspider_file_not_found(self) -> None:\n _, _, log = proc(\"runspider\", \"some_non_existent_file\")\n assert \"File not found: some_non_existent_file\" in log\n\n def test_runspider_unable_to_load(self, tmp_path: Path) -> None:\n log = self.get_log(tmp_path, \"\", name=\"myspider.txt\")\n assert \"Unable to load\" in log\n\n def test_start_errors(self, tmp_path: Path) -> None:\n log = self.get_log(tmp_path, self.badspider, name=\"badspider.py\")\n assert \"start\" in log\n assert \"badspider.py\" in log, log\n\n def test_asyncio_enabled_true(self, tmp_path: Path) -> None:\n log = self.get_log(\n tmp_path,\n self.debug_log_spider,\n args=[\n \"-s\",\n \"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor\",\n ],\n )\n assert (\n \"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor\"\n in log\n )\n\n def test_asyncio_enabled_default(self, tmp_path: Path) -> None:\n log = self.get_log(tmp_path, self.debug_log_spider)\n assert (\n \"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor\"\n in log\n )\n\n def test_asyncio_enabled_false(self, tmp_path: Path) -> None:\n log = self.get_log(\n tmp_path,\n self.debug_log_spider,\n args=[\"-s\", \"TWISTED_REACTOR=twisted.internet.selectreactor.SelectReactor\"],\n )\n assert \"Using reactor: twisted.internet.selectreactor.SelectReactor\" in log\n assert (\n \"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor\"\n not in log\n )\n\n @pytest.mark.requires_uvloop\n def test_custom_asyncio_loop_enabled_true(self, tmp_path: Path) -> None:\n log = self.get_log(\n tmp_path,\n self.debug_log_spider,\n args=[\n \"-s\",\n \"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor\",\n \"-s\",\n \"ASYNCIO_EVENT_LOOP=uvloop.Loop\",\n ],\n )\n assert \"Using asyncio event loop: uvloop.Loop\" in log\n\n def test_custom_asyncio_loop_enabled_false(self, tmp_path: Path) -> None:\n log = self.get_log(\n tmp_path,\n self.debug_log_spider,\n args=[\n \"-s\",\n \"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor\",\n ],\n )\n if sys.platform != \"win32\":\n loop = asyncio.new_event_loop()\n else:\n loop = asyncio.SelectorEventLoop()\n assert (\n f\"Using asyncio event loop: {loop.__module__}.{loop.__class__.__name__}\"\n in log\n )\n\n def test_output(self, tmp_path: Path) -> None:\n spider_code = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n async def start(self):\n self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))\n return\n yield\n\"\"\"\n args = [\"-o\", \"example.json\"]\n log = self.get_log(tmp_path, spider_code, args=args)\n assert \"[myspider] DEBUG: FEEDS: {'example.json': {'format': 'json'}}\" in log\n\n def test_overwrite_output(self, tmp_path: Path) -> None:\n spider_code = \"\"\"\nimport json\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n async def start(self):\n self.logger.debug(\n 'FEEDS: {}'.format(\n json.dumps(self.settings.getdict('FEEDS'), sort_keys=True)\n )\n )\n return\n yield\n\"\"\"\n (tmp_path / \"example.json\").write_text(\"not empty\", encoding=\"utf-8\")\n args = [\"-O\", \"example.json\"]\n log = self.get_log(tmp_path, spider_code, args=args)\n assert (\n '[myspider] DEBUG: FEEDS: {\"example.json\": {\"format\": \"json\", \"overwrite\": true}}'\n in log\n )\n with (tmp_path / \"example.json\").open(encoding=\"utf-8\") as f2:\n first_line = f2.readline()\n assert first_line != \"not empty\"\n\n def test_output_and_overwrite_output(self, tmp_path: Path) -> None:\n spider_code = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n async def start(self):\n return\n yield\n\"\"\"\n args = [\"-o\", \"example1.json\", \"-O\", \"example2.json\"]\n log = self.get_log(tmp_path, spider_code, args=args)\n assert (\n \"error: Please use only one of -o/--output and -O/--overwrite-output\" in log\n )\n\n def test_output_stdout(self, tmp_path: Path) -> None:\n spider_code = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n async def start(self):\n self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))\n return\n yield\n\"\"\"\n args = [\"-o\", \"-:json\"]\n log = self.get_log(tmp_path, spider_code, args=args)\n assert \"[myspider] DEBUG: FEEDS: {'stdout:': {'format': 'json'}}\" in log\n\n @pytest.mark.parametrize(\"arg\", [\"output.json:json\", \"output.json\"])\n def test_absolute_path(self, tmp_path: Path, arg: str) -> None:\n spider_code = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n start_urls = [\"data:,\"]\n\n def parse(self, response):\n yield {\"hello\": \"world\"}\n \"\"\"\n\n args = [\"-o\", str(tmp_path / arg)]\n log = self.get_log(tmp_path, spider_code, args=args)\n assert (\n f\"[scrapy.extensions.feedexport] INFO: Stored json feed (1 items) in: {tmp_path / 'output.json'}\"\n in log\n )\n\n def test_args_change_settings(self, tmp_path: Path) -> None:\n spider_code = \"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'myspider'\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = super().from_crawler(crawler, *args, **kwargs)\n spider.settings.set(\"FOO\", kwargs.get(\"foo\"))\n return spider\n\n async def start(self):\n self.logger.info(f\"The value of FOO is {self.settings.getint('FOO')}\")\n return\n yield\n\"\"\"\n args = [\"-a\", \"foo=42\"]\n log = self.get_log(tmp_path, spider_code, args=args)\n assert \"Spider closed (finished)\" in log\n assert \"The value of FOO is 42\" in log\n\n\n@pytest.mark.skipif(\n platform.system() != \"Windows\", reason=\"Windows required for .pyw files\"\n)\nclass TestWindowsRunSpiderCommand(TestRunSpiderCommand):\n spider_filename = \"myspider.pyw\"\n\n def test_start_errors(self, tmp_path: Path) -> None:\n log = self.get_log(tmp_path, self.badspider, name=\"badspider.pyw\")\n assert \"start\" in log\n assert \"badspider.pyw\" in log\n\n def test_runspider_unable_to_load(self, tmp_path: Path) -> None:\n pytest.skip(\"Already Tested in 'RunSpiderCommandTest'\")\n", "framework": "pytest", "test_command": "pytest tests/test_command_runspider.py::TestRunSpiderCommand::test_runspider_file_not_found -xvs"}, {"test_file": "tests/test_command_version.py", "test_function": "TestVersionCommand.test_output", "test_content": "import scrapy\nfrom tests.utils.cmdline import proc\n\n\nclass TestVersionCommand:\n def test_output(self) -> None:\n _, out, _ = proc(\"version\")\n assert out.strip() == f\"Scrapy {scrapy.__version__}\"\n\n def test_verbose_output(self) -> None:\n _, out, _ = proc(\"version\", \"-v\")\n headers = [line.partition(\":\")[0].strip() for line in out.strip().splitlines()]\n assert headers == [\n \"Scrapy\",\n \"lxml\",\n \"libxml2\",\n \"cssselect\",\n \"parsel\",\n \"w3lib\",\n \"Twisted\",\n \"Python\",\n \"pyOpenSSL\",\n \"cryptography\",\n \"Platform\",\n ]\n", "framework": "pytest", "test_command": "pytest tests/test_command_version.py::TestVersionCommand::test_output -xvs"}, {"test_file": "tests/test_command_version.py", "test_function": "TestVersionCommand.test_verbose_output", "test_content": "import scrapy\nfrom tests.utils.cmdline import proc\n\n\nclass TestVersionCommand:\n def test_output(self) -> None:\n _, out, _ = proc(\"version\")\n assert out.strip() == f\"Scrapy {scrapy.__version__}\"\n\n def test_verbose_output(self) -> None:\n _, out, _ = proc(\"version\", \"-v\")\n headers = [line.partition(\":\")[0].strip() for line in out.strip().splitlines()]\n assert headers == [\n \"Scrapy\",\n \"lxml\",\n \"libxml2\",\n \"cssselect\",\n \"parsel\",\n \"w3lib\",\n \"Twisted\",\n \"Python\",\n \"pyOpenSSL\",\n \"cryptography\",\n \"Platform\",\n ]\n", "framework": "pytest", "test_command": "pytest tests/test_command_version.py::TestVersionCommand::test_verbose_output -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_empty", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_empty -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_response_body", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_response_body -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_response_type_text", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_response_type_text -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_response_type_html", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_response_type_html -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_response_selector_html", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_response_selector_html -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_response_encoding_gb18030", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_response_encoding_gb18030 -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_redirect", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_redirect -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_redirect_follow_302", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_redirect_follow_302 -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_redirect_not_follow_302", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_redirect_not_follow_302 -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_fetch_redirect_follow_302", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_fetch_redirect_follow_302 -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_fetch_redirect_not_follow_302", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_fetch_redirect_not_follow_302 -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_request_replace", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_request_replace -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_scrapy_import", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_scrapy_import -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_local_file", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_local_file -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_local_nofile", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_local_nofile -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_dns_failures", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_dns_failures -xvs"}, {"test_file": "tests/test_command_shell.py", "test_function": "TestShellCommand.test_shell_fetch_async", "test_content": "from __future__ import annotations\n\nimport os\nimport sys\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, cast\n\nimport pytest\nfrom pexpect.popen_spawn import PopenSpawn\n\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests import NON_EXISTING_RESOLVABLE, tests_datadir\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestShellCommand:\n def test_empty(self) -> None:\n _, out, _ = proc(\"shell\", \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_response_body(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"response.body\")\n assert \"Works\" in out\n\n def test_response_type_text(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/text\"), \"-c\", \"type(response)\")\n assert \"TextResponse\" in out\n\n def test_response_type_html(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", \"type(response)\")\n assert \"HtmlResponse\" in out\n\n def test_response_selector_html(self, mockserver: MockServer) -> None:\n xpath = \"response.xpath(\\\"//p[@class='one']/text()\\\").get()\"\n _, out, _ = proc(\"shell\", mockserver.url(\"/html\"), \"-c\", xpath)\n assert out.strip() == \"Works\"\n\n def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\", mockserver.url(\"/enc-gb18030\"), \"-c\", \"response.encoding\"\n )\n assert out.strip() == \"gb18030\"\n\n def test_redirect(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"shell\", mockserver.url(\"/redirect\"), \"-c\", \"response.url\")\n assert out.strip().endswith(\"/redirected\")\n\n def test_redirect_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"200\")\n\n def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\n \"shell\",\n \"--no-redirect\",\n mockserver.url(\"/redirect-no-meta-refresh\"),\n \"-c\",\n \"response.status\",\n )\n assert out.strip().endswith(\"302\")\n\n def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url)`` follows HTTP redirects by default.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}')\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Redirecting (302)\" in err\n assert \"Crawled (200)\" in err\n\n def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:\n \"\"\"Test that calling ``fetch(url, redirect=False)`` disables automatic redirects.\"\"\"\n url = mockserver.url(\"/redirect-no-meta-refresh\")\n code = f\"fetch('{url}', redirect=False)\"\n ret, out, err = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n assert \"Crawled (302)\" in err\n\n def test_request_replace(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch('{url}') or fetch(response.request.replace(method='POST'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_scrapy_import(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/text\")\n code = f\"fetch(scrapy.Request('{url}'))\"\n ret, out, _ = proc(\"shell\", \"-c\", code)\n assert ret == 0, out\n\n def test_local_file(self) -> None:\n filepath = Path(tests_datadir, \"test_site\", \"index.html\")\n _, out, _ = proc(\"shell\", str(filepath), \"-c\", \"item\")\n assert \"{}\" in out\n\n def test_local_nofile(self) -> None:\n filepath = \"file:///tests/sample_data/test_site/nothinghere.html\"\n ret, out, err = proc(\"shell\", filepath, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"No such file or directory\" in err\n\n def test_dns_failures(self, mockserver: MockServer) -> None:\n if NON_EXISTING_RESOLVABLE:\n pytest.skip(\"Non-existing hosts are resolvable\")\n url = \"www.somedomainthatdoesntexi.st\"\n ret, out, err = proc(\"shell\", url, \"-c\", \"item\")\n assert ret == 1, out or err\n assert \"CannotResolveHostError\" in err\n\n def test_shell_fetch_async(self, mockserver: MockServer) -> None:\n url = mockserver.url(\"/html\")\n code = f\"fetch('{url}')\"\n ret, _, err = proc(\n \"shell\", \"-c\", code, \"--set\", f\"TWISTED_REACTOR={_asyncio_reactor_path}\"\n )\n assert ret == 0, err\n assert \"RuntimeError: There is no current event loop in thread\" not in err\n\n\nclass TestInteractiveShell:\n def test_fetch(self, mockserver: MockServer) -> None:\n args = (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"shell\",\n )\n env = os.environ.copy()\n env[\"SCRAPY_PYTHON_SHELL\"] = \"python\"\n logfile = BytesIO()\n # https://github.com/python/typeshed/issues/14915\n p = PopenSpawn(args, env=cast(\"os._Environ\", env), timeout=5)\n p.logfile_read = logfile\n p.expect_exact(\"Available Scrapy objects\")\n p.sendline(f\"fetch('{mockserver.url('/')}')\")\n p.sendline(\"type(response)\")\n p.expect_exact(\"HtmlResponse\")\n p.sendeof()\n p.wait()\n logfile.seek(0)\n assert \"Traceback\" not in logfile.read().decode()\n", "framework": "pytest", "test_command": "pytest tests/test_command_shell.py::TestShellCommand::test_shell_fetch_async -xvs"}, {"test_file": "tests/test_commands.py", "test_function": "TestCommandCrawlerProcess.test_project_asyncio_spider_settings_select", "test_content": "from __future__ import annotations\n\nimport argparse\nimport json\nfrom io import StringIO\nfrom shutil import copytree\nfrom typing import TYPE_CHECKING\nfrom unittest import mock\n\nimport pytest\n\nimport scrapy\nfrom scrapy.cmdline import _pop_command_name, _print_unknown_command_msg\nfrom scrapy.commands import ScrapyCommand, ScrapyHelpFormatter, view\nfrom scrapy.settings import Settings\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass EmptyCommand(ScrapyCommand):\n def short_desc(self) -> str:\n return \"\"\n\n def run(self, args: list[str], opts: argparse.Namespace) -> None:\n pass\n\n\nclass TestCommandSettings:\n def setup_method(self):\n self.command = EmptyCommand()\n self.command.settings = Settings()\n self.parser = argparse.ArgumentParser(\n formatter_class=ScrapyHelpFormatter, conflict_handler=\"resolve\"\n )\n self.command.add_options(self.parser)\n\n def test_settings_json_string(self):\n feeds_json = '{\"data.json\": {\"format\": \"json\"}, \"data.xml\": {\"format\": \"xml\"}}'\n opts, args = self.parser.parse_known_args(\n args=[\"-s\", f\"FEEDS={feeds_json}\", \"spider.py\"]\n )\n self.command.process_options(args, opts)\n assert isinstance(self.command.settings[\"FEEDS\"], scrapy.settings.BaseSettings)\n assert dict(self.command.settings[\"FEEDS\"]) == json.loads(feeds_json)\n\n def test_help_formatter(self):\n formatter = ScrapyHelpFormatter(prog=\"scrapy\")\n part_strings = [\n \"usage: scrapy genspider [options] <name> <domain>\\n\\n\",\n \"\\n\",\n \"optional arguments:\\n\",\n \"\\n\",\n \"Global Options:\\n\",\n ]\n assert formatter._join_parts(part_strings) == (\n \"Usage\\n=====\\n scrapy genspider [options] <name> <domain>\\n\\n\\n\"\n \"Optional Arguments\\n==================\\n\\n\"\n \"Global Options\\n--------------\\n\"\n )\n\n\nclass TestProjectBase:\n \"\"\"A base class for tests that may need a Scrapy project.\"\"\"\n\n project_name = \"testproject\"\n\n @pytest.fixture(scope=\"session\")\n def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path:\n \"\"\"Create a Scrapy project in a temporary directory and return its path.\n\n Used as a cache for ``proj_path``.\n \"\"\"\n tmp_path = tmp_path_factory.mktemp(\"proj\")\n call(\"startproject\", self.project_name, cwd=tmp_path)\n return tmp_path / self.project_name\n\n @pytest.fixture\n def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path:\n \"\"\"Copy a pre-generated Scrapy project into a temporary directory and return its path.\"\"\"\n proj_path = tmp_path / self.project_name\n copytree(_proj_path_cached, proj_path)\n return proj_path\n\n\nclass TestCommandCrawlerProcess(TestProjectBase):\n \"\"\"Test that the command uses the expected kind of *CrawlerProcess\n and produces expected errors when needed.\"\"\"\n\n name = \"crawl\"\n NORMAL_MSG = \"Using CrawlerProcess\"\n ASYNC_MSG = \"Using AsyncCrawlerProcess\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"sp.py\").write_text(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'sp'\n\n custom_settings = {}\n\n async def start(self):\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n (proj_mod_path / \"spiders\" / \"aiosp.py\").write_text(\"\"\"\nimport asyncio\n\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'aiosp'\n\n custom_settings = {}\n\n async def start(self):\n await asyncio.sleep(0.01)\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n self._append_settings(proj_mod_path, \"LOG_LEVEL = 'DEBUG'\\n\")\n\n @staticmethod\n def _append_settings(proj_mod_path: Path, text: str) -> None:\n \"\"\"Add text to the end of the project settings.py.\"\"\"\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(text)\n\n @staticmethod\n def _replace_custom_settings(\n proj_mod_path: Path, spider_name: str, text: str\n ) -> None:\n \"\"\"Replace custom_settings in the given spider file with the given text.\"\"\"\n spider_path = proj_mod_path / \"spiders\" / f\"{spider_name}.py\"\n with spider_path.open(\"r+\", encoding=\"utf-8\") as f:\n content = f.read()\n content = content.replace(\n \"custom_settings = {}\", f\"custom_settings = {text}\"\n )\n f.seek(0)\n f.write(content)\n f.truncate()\n\n def _assert_spider_works(self, msg: str, proj_path: Path, *args: str) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider works.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"It works!\" in err\n assert \"Spider closed (finished)\" in err\n\n def _assert_spider_asyncio_fail(\n self, msg: str, proj_path: Path, *args: str\n ) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider fails to use asyncio.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"no running event loop\" in err\n\n def test_project_settings(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project default settings (to the asyncio value).\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the asyncio value.\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(\n self.ASYNC_MSG,\n proj_path,\n spider,\n \"-s\",\n f\"TWISTED_REACTOR={_asyncio_reactor_path}\",\n )\n\n def test_project_settings_explicit_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor explicitly is set via the project settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_settings_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_spider_settings_asyncio_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value\n and via command line to the empty value. The command line value takes\n precedence so the spider settings don't matter.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_empty_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value\n and via the spider settings to the asyncio value. CrawlerProcess is\n chosen based on the project settings, but the asyncio reactor is chosen\n based on the spider settings.\n\n CrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.NORMAL_MSG, proj_path, spider)\n\n def test_project_asyncio_spider_settings_select(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value. AsyncCrawlerProcess\n is chosen based on the project settings, and the conflicting reactor\n setting in the spider settings causes an exception.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders produce a\n mismatched reactor exception.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n _, _, err = proc(self.name, spider, cwd=proj_path)\n assert self.ASYNC_MSG in err\n assert (\n \"The installed reactor (twisted.internet.asyncioreactor.AsyncioSelectorReactor)\"\n \" does not match the requested one\"\n \" (twisted.internet.selectreactor.SelectReactor)\"\n ) in err\n\n def test_project_asyncio_spider_settings_select_forced(\n self, proj_path: Path\n ) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value, CrawlerProcess is\n forced via the project settings. The reactor is chosen based on the\n spider settings.\n\n CrawlerProcess, the select reactor, only the normal spider works.\"\"\"\n self._append_settings(\n proj_path / self.project_name, \"FORCE_CRAWLER_PROCESS = True\\n\"\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(self.NORMAL_MSG, proj_path, \"aiosp\")\n\n\nclass TestMiscCommands(TestProjectBase):\n def test_list(self, proj_path: Path) -> None:\n assert call(\"list\", cwd=proj_path) == 0\n\n def test_list_subdir(self, proj_path: Path) -> None:\n \"\"\"Test that commands work in a subdirectory of the project.\"\"\"\n subdir = proj_path / \"subdir\"\n subdir.mkdir(exist_ok=True)\n assert call(\"list\", cwd=subdir) == 0\n\n def test_command_not_found(self) -> None:\n na_msg = \"\"\"\nThe list command is not available from this location.\nThese commands are only available from within a project: check, crawl, edit, list, parse.\n\"\"\"\n not_found_msg = \"\"\"\nUnknown command: abc\n\"\"\"\n params = [\n (\"list\", False, na_msg),\n (\"abc\", False, not_found_msg),\n (\"abc\", True, not_found_msg),\n ]\n for cmdname, inproject, message in params:\n with mock.patch(\"sys.stdout\", new=StringIO()) as out:\n _print_unknown_command_msg(Settings(), cmdname, inproject)\n assert out.getvalue().strip() == message.strip()\n\n\nclass TestBenchCommand:\n def test_run(self) -> None:\n _, _, err = proc(\n \"bench\",\n \"-s\",\n \"LOGSTATS_INTERVAL=0.001\",\n \"-s\",\n \"CLOSESPIDER_TIMEOUT=0.01\",\n )\n assert \"INFO: Crawled\" in err\n assert \"Unhandled Error\" not in err\n assert \"log_count/ERROR\" not in err\n\n\nclass TestViewCommand:\n def test_methods(self) -> None:\n command = view.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n prefix_chars=\"-\",\n formatter_class=ScrapyHelpFormatter,\n conflict_handler=\"resolve\",\n )\n command.add_options(parser)\n assert command.short_desc() == \"Open URL in browser, as seen by Scrapy\"\n assert \"URL using the Scrapy downloader and show its\" in command.long_desc()\n\n\nclass TestHelpMessage(TestProjectBase):\n COMMANDS = [\n \"parse\",\n \"startproject\",\n \"view\",\n \"crawl\",\n \"edit\",\n \"list\",\n \"fetch\",\n \"settings\",\n \"shell\",\n \"runspider\",\n \"version\",\n \"genspider\",\n \"check\",\n \"bench\",\n ]\n\n def test_help_messages(self, proj_path: Path) -> None:\n for command in self.COMMANDS:\n _, out, _ = proc(command, \"-h\", cwd=proj_path)\n assert \"Usage\" in out\n\n\nclass TestPopCommandName:\n def test_valid_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"my_spider\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"my_spider\"]\n\n def test_no_command(self) -> None:\n argv = [\"scrapy\"]\n command = _pop_command_name(argv)\n assert command is None\n assert argv == [\"scrapy\"]\n\n def test_option_before_command(self) -> None:\n argv = [\"scrapy\", \"-h\", \"crawl\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n\n def test_option_after_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"-h\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n", "framework": "pytest", "test_command": "pytest tests/test_commands.py::TestCommandCrawlerProcess::test_project_asyncio_spider_settings_select -xvs"}, {"test_file": "tests/test_commands.py", "test_function": "TestBenchCommand.test_run", "test_content": "from __future__ import annotations\n\nimport argparse\nimport json\nfrom io import StringIO\nfrom shutil import copytree\nfrom typing import TYPE_CHECKING\nfrom unittest import mock\n\nimport pytest\n\nimport scrapy\nfrom scrapy.cmdline import _pop_command_name, _print_unknown_command_msg\nfrom scrapy.commands import ScrapyCommand, ScrapyHelpFormatter, view\nfrom scrapy.settings import Settings\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass EmptyCommand(ScrapyCommand):\n def short_desc(self) -> str:\n return \"\"\n\n def run(self, args: list[str], opts: argparse.Namespace) -> None:\n pass\n\n\nclass TestCommandSettings:\n def setup_method(self):\n self.command = EmptyCommand()\n self.command.settings = Settings()\n self.parser = argparse.ArgumentParser(\n formatter_class=ScrapyHelpFormatter, conflict_handler=\"resolve\"\n )\n self.command.add_options(self.parser)\n\n def test_settings_json_string(self):\n feeds_json = '{\"data.json\": {\"format\": \"json\"}, \"data.xml\": {\"format\": \"xml\"}}'\n opts, args = self.parser.parse_known_args(\n args=[\"-s\", f\"FEEDS={feeds_json}\", \"spider.py\"]\n )\n self.command.process_options(args, opts)\n assert isinstance(self.command.settings[\"FEEDS\"], scrapy.settings.BaseSettings)\n assert dict(self.command.settings[\"FEEDS\"]) == json.loads(feeds_json)\n\n def test_help_formatter(self):\n formatter = ScrapyHelpFormatter(prog=\"scrapy\")\n part_strings = [\n \"usage: scrapy genspider [options] <name> <domain>\\n\\n\",\n \"\\n\",\n \"optional arguments:\\n\",\n \"\\n\",\n \"Global Options:\\n\",\n ]\n assert formatter._join_parts(part_strings) == (\n \"Usage\\n=====\\n scrapy genspider [options] <name> <domain>\\n\\n\\n\"\n \"Optional Arguments\\n==================\\n\\n\"\n \"Global Options\\n--------------\\n\"\n )\n\n\nclass TestProjectBase:\n \"\"\"A base class for tests that may need a Scrapy project.\"\"\"\n\n project_name = \"testproject\"\n\n @pytest.fixture(scope=\"session\")\n def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path:\n \"\"\"Create a Scrapy project in a temporary directory and return its path.\n\n Used as a cache for ``proj_path``.\n \"\"\"\n tmp_path = tmp_path_factory.mktemp(\"proj\")\n call(\"startproject\", self.project_name, cwd=tmp_path)\n return tmp_path / self.project_name\n\n @pytest.fixture\n def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path:\n \"\"\"Copy a pre-generated Scrapy project into a temporary directory and return its path.\"\"\"\n proj_path = tmp_path / self.project_name\n copytree(_proj_path_cached, proj_path)\n return proj_path\n\n\nclass TestCommandCrawlerProcess(TestProjectBase):\n \"\"\"Test that the command uses the expected kind of *CrawlerProcess\n and produces expected errors when needed.\"\"\"\n\n name = \"crawl\"\n NORMAL_MSG = \"Using CrawlerProcess\"\n ASYNC_MSG = \"Using AsyncCrawlerProcess\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"sp.py\").write_text(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'sp'\n\n custom_settings = {}\n\n async def start(self):\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n (proj_mod_path / \"spiders\" / \"aiosp.py\").write_text(\"\"\"\nimport asyncio\n\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'aiosp'\n\n custom_settings = {}\n\n async def start(self):\n await asyncio.sleep(0.01)\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n self._append_settings(proj_mod_path, \"LOG_LEVEL = 'DEBUG'\\n\")\n\n @staticmethod\n def _append_settings(proj_mod_path: Path, text: str) -> None:\n \"\"\"Add text to the end of the project settings.py.\"\"\"\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(text)\n\n @staticmethod\n def _replace_custom_settings(\n proj_mod_path: Path, spider_name: str, text: str\n ) -> None:\n \"\"\"Replace custom_settings in the given spider file with the given text.\"\"\"\n spider_path = proj_mod_path / \"spiders\" / f\"{spider_name}.py\"\n with spider_path.open(\"r+\", encoding=\"utf-8\") as f:\n content = f.read()\n content = content.replace(\n \"custom_settings = {}\", f\"custom_settings = {text}\"\n )\n f.seek(0)\n f.write(content)\n f.truncate()\n\n def _assert_spider_works(self, msg: str, proj_path: Path, *args: str) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider works.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"It works!\" in err\n assert \"Spider closed (finished)\" in err\n\n def _assert_spider_asyncio_fail(\n self, msg: str, proj_path: Path, *args: str\n ) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider fails to use asyncio.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"no running event loop\" in err\n\n def test_project_settings(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project default settings (to the asyncio value).\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the asyncio value.\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(\n self.ASYNC_MSG,\n proj_path,\n spider,\n \"-s\",\n f\"TWISTED_REACTOR={_asyncio_reactor_path}\",\n )\n\n def test_project_settings_explicit_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor explicitly is set via the project settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_settings_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_spider_settings_asyncio_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value\n and via command line to the empty value. The command line value takes\n precedence so the spider settings don't matter.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_empty_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value\n and via the spider settings to the asyncio value. CrawlerProcess is\n chosen based on the project settings, but the asyncio reactor is chosen\n based on the spider settings.\n\n CrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.NORMAL_MSG, proj_path, spider)\n\n def test_project_asyncio_spider_settings_select(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value. AsyncCrawlerProcess\n is chosen based on the project settings, and the conflicting reactor\n setting in the spider settings causes an exception.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders produce a\n mismatched reactor exception.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n _, _, err = proc(self.name, spider, cwd=proj_path)\n assert self.ASYNC_MSG in err\n assert (\n \"The installed reactor (twisted.internet.asyncioreactor.AsyncioSelectorReactor)\"\n \" does not match the requested one\"\n \" (twisted.internet.selectreactor.SelectReactor)\"\n ) in err\n\n def test_project_asyncio_spider_settings_select_forced(\n self, proj_path: Path\n ) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value, CrawlerProcess is\n forced via the project settings. The reactor is chosen based on the\n spider settings.\n\n CrawlerProcess, the select reactor, only the normal spider works.\"\"\"\n self._append_settings(\n proj_path / self.project_name, \"FORCE_CRAWLER_PROCESS = True\\n\"\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(self.NORMAL_MSG, proj_path, \"aiosp\")\n\n\nclass TestMiscCommands(TestProjectBase):\n def test_list(self, proj_path: Path) -> None:\n assert call(\"list\", cwd=proj_path) == 0\n\n def test_list_subdir(self, proj_path: Path) -> None:\n \"\"\"Test that commands work in a subdirectory of the project.\"\"\"\n subdir = proj_path / \"subdir\"\n subdir.mkdir(exist_ok=True)\n assert call(\"list\", cwd=subdir) == 0\n\n def test_command_not_found(self) -> None:\n na_msg = \"\"\"\nThe list command is not available from this location.\nThese commands are only available from within a project: check, crawl, edit, list, parse.\n\"\"\"\n not_found_msg = \"\"\"\nUnknown command: abc\n\"\"\"\n params = [\n (\"list\", False, na_msg),\n (\"abc\", False, not_found_msg),\n (\"abc\", True, not_found_msg),\n ]\n for cmdname, inproject, message in params:\n with mock.patch(\"sys.stdout\", new=StringIO()) as out:\n _print_unknown_command_msg(Settings(), cmdname, inproject)\n assert out.getvalue().strip() == message.strip()\n\n\nclass TestBenchCommand:\n def test_run(self) -> None:\n _, _, err = proc(\n \"bench\",\n \"-s\",\n \"LOGSTATS_INTERVAL=0.001\",\n \"-s\",\n \"CLOSESPIDER_TIMEOUT=0.01\",\n )\n assert \"INFO: Crawled\" in err\n assert \"Unhandled Error\" not in err\n assert \"log_count/ERROR\" not in err\n\n\nclass TestViewCommand:\n def test_methods(self) -> None:\n command = view.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n prefix_chars=\"-\",\n formatter_class=ScrapyHelpFormatter,\n conflict_handler=\"resolve\",\n )\n command.add_options(parser)\n assert command.short_desc() == \"Open URL in browser, as seen by Scrapy\"\n assert \"URL using the Scrapy downloader and show its\" in command.long_desc()\n\n\nclass TestHelpMessage(TestProjectBase):\n COMMANDS = [\n \"parse\",\n \"startproject\",\n \"view\",\n \"crawl\",\n \"edit\",\n \"list\",\n \"fetch\",\n \"settings\",\n \"shell\",\n \"runspider\",\n \"version\",\n \"genspider\",\n \"check\",\n \"bench\",\n ]\n\n def test_help_messages(self, proj_path: Path) -> None:\n for command in self.COMMANDS:\n _, out, _ = proc(command, \"-h\", cwd=proj_path)\n assert \"Usage\" in out\n\n\nclass TestPopCommandName:\n def test_valid_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"my_spider\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"my_spider\"]\n\n def test_no_command(self) -> None:\n argv = [\"scrapy\"]\n command = _pop_command_name(argv)\n assert command is None\n assert argv == [\"scrapy\"]\n\n def test_option_before_command(self) -> None:\n argv = [\"scrapy\", \"-h\", \"crawl\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n\n def test_option_after_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"-h\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n", "framework": "pytest", "test_command": "pytest tests/test_commands.py::TestBenchCommand::test_run -xvs"}, {"test_file": "tests/test_commands.py", "test_function": "TestHelpMessage.test_help_messages", "test_content": "from __future__ import annotations\n\nimport argparse\nimport json\nfrom io import StringIO\nfrom shutil import copytree\nfrom typing import TYPE_CHECKING\nfrom unittest import mock\n\nimport pytest\n\nimport scrapy\nfrom scrapy.cmdline import _pop_command_name, _print_unknown_command_msg\nfrom scrapy.commands import ScrapyCommand, ScrapyHelpFormatter, view\nfrom scrapy.settings import Settings\nfrom scrapy.utils.reactor import _asyncio_reactor_path\nfrom tests.utils.cmdline import call, proc\n\nif TYPE_CHECKING:\n from pathlib import Path\n\n\nclass EmptyCommand(ScrapyCommand):\n def short_desc(self) -> str:\n return \"\"\n\n def run(self, args: list[str], opts: argparse.Namespace) -> None:\n pass\n\n\nclass TestCommandSettings:\n def setup_method(self):\n self.command = EmptyCommand()\n self.command.settings = Settings()\n self.parser = argparse.ArgumentParser(\n formatter_class=ScrapyHelpFormatter, conflict_handler=\"resolve\"\n )\n self.command.add_options(self.parser)\n\n def test_settings_json_string(self):\n feeds_json = '{\"data.json\": {\"format\": \"json\"}, \"data.xml\": {\"format\": \"xml\"}}'\n opts, args = self.parser.parse_known_args(\n args=[\"-s\", f\"FEEDS={feeds_json}\", \"spider.py\"]\n )\n self.command.process_options(args, opts)\n assert isinstance(self.command.settings[\"FEEDS\"], scrapy.settings.BaseSettings)\n assert dict(self.command.settings[\"FEEDS\"]) == json.loads(feeds_json)\n\n def test_help_formatter(self):\n formatter = ScrapyHelpFormatter(prog=\"scrapy\")\n part_strings = [\n \"usage: scrapy genspider [options] <name> <domain>\\n\\n\",\n \"\\n\",\n \"optional arguments:\\n\",\n \"\\n\",\n \"Global Options:\\n\",\n ]\n assert formatter._join_parts(part_strings) == (\n \"Usage\\n=====\\n scrapy genspider [options] <name> <domain>\\n\\n\\n\"\n \"Optional Arguments\\n==================\\n\\n\"\n \"Global Options\\n--------------\\n\"\n )\n\n\nclass TestProjectBase:\n \"\"\"A base class for tests that may need a Scrapy project.\"\"\"\n\n project_name = \"testproject\"\n\n @pytest.fixture(scope=\"session\")\n def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path:\n \"\"\"Create a Scrapy project in a temporary directory and return its path.\n\n Used as a cache for ``proj_path``.\n \"\"\"\n tmp_path = tmp_path_factory.mktemp(\"proj\")\n call(\"startproject\", self.project_name, cwd=tmp_path)\n return tmp_path / self.project_name\n\n @pytest.fixture\n def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path:\n \"\"\"Copy a pre-generated Scrapy project into a temporary directory and return its path.\"\"\"\n proj_path = tmp_path / self.project_name\n copytree(_proj_path_cached, proj_path)\n return proj_path\n\n\nclass TestCommandCrawlerProcess(TestProjectBase):\n \"\"\"Test that the command uses the expected kind of *CrawlerProcess\n and produces expected errors when needed.\"\"\"\n\n name = \"crawl\"\n NORMAL_MSG = \"Using CrawlerProcess\"\n ASYNC_MSG = \"Using AsyncCrawlerProcess\"\n\n @pytest.fixture(autouse=True)\n def create_files(self, proj_path: Path) -> None:\n proj_mod_path = proj_path / self.project_name\n (proj_mod_path / \"spiders\" / \"sp.py\").write_text(\"\"\"\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'sp'\n\n custom_settings = {}\n\n async def start(self):\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n (proj_mod_path / \"spiders\" / \"aiosp.py\").write_text(\"\"\"\nimport asyncio\n\nimport scrapy\n\nclass MySpider(scrapy.Spider):\n name = 'aiosp'\n\n custom_settings = {}\n\n async def start(self):\n await asyncio.sleep(0.01)\n self.logger.debug('It works!')\n return\n yield\n\"\"\")\n\n self._append_settings(proj_mod_path, \"LOG_LEVEL = 'DEBUG'\\n\")\n\n @staticmethod\n def _append_settings(proj_mod_path: Path, text: str) -> None:\n \"\"\"Add text to the end of the project settings.py.\"\"\"\n with (proj_mod_path / \"settings.py\").open(\"a\", encoding=\"utf-8\") as f:\n f.write(text)\n\n @staticmethod\n def _replace_custom_settings(\n proj_mod_path: Path, spider_name: str, text: str\n ) -> None:\n \"\"\"Replace custom_settings in the given spider file with the given text.\"\"\"\n spider_path = proj_mod_path / \"spiders\" / f\"{spider_name}.py\"\n with spider_path.open(\"r+\", encoding=\"utf-8\") as f:\n content = f.read()\n content = content.replace(\n \"custom_settings = {}\", f\"custom_settings = {text}\"\n )\n f.seek(0)\n f.write(content)\n f.truncate()\n\n def _assert_spider_works(self, msg: str, proj_path: Path, *args: str) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider works.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"It works!\" in err\n assert \"Spider closed (finished)\" in err\n\n def _assert_spider_asyncio_fail(\n self, msg: str, proj_path: Path, *args: str\n ) -> None:\n \"\"\"The command uses the expected *CrawlerProcess, the spider fails to use asyncio.\"\"\"\n _, _, err = proc(self.name, *args, cwd=proj_path)\n assert msg in err\n assert \"no running event loop\" in err\n\n def test_project_settings(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project default settings (to the asyncio value).\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the asyncio value.\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(\n self.ASYNC_MSG,\n proj_path,\n spider,\n \"-s\",\n f\"TWISTED_REACTOR={_asyncio_reactor_path}\",\n )\n\n def test_project_settings_explicit_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor explicitly is set via the project settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n\n for spider in [\"sp\", \"aiosp\"]:\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the command line to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_settings_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.ASYNC_MSG, proj_path, spider)\n\n def test_spider_settings_asyncio_cmdline_empty(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the spider settings to the asyncio value\n and via command line to the empty value. The command line value takes\n precedence so the spider settings don't matter.\n\n CrawlerProcess, the default reactor, only the normal spider works.\"\"\"\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n\n self._assert_spider_works(\n self.NORMAL_MSG, proj_path, \"sp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n self._assert_spider_asyncio_fail(\n self.NORMAL_MSG, proj_path, \"aiosp\", \"-s\", \"TWISTED_REACTOR=\"\n )\n\n def test_project_empty_spider_settings_asyncio(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the empty value\n and via the spider settings to the asyncio value. CrawlerProcess is\n chosen based on the project settings, but the asyncio reactor is chosen\n based on the spider settings.\n\n CrawlerProcess, the asyncio reactor, both spiders work.\"\"\"\n self._append_settings(proj_path / self.project_name, \"TWISTED_REACTOR = None\\n\")\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n f\"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}\",\n )\n self._assert_spider_works(self.NORMAL_MSG, proj_path, spider)\n\n def test_project_asyncio_spider_settings_select(self, proj_path: Path) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value. AsyncCrawlerProcess\n is chosen based on the project settings, and the conflicting reactor\n setting in the spider settings causes an exception.\n\n AsyncCrawlerProcess, the asyncio reactor, both spiders produce a\n mismatched reactor exception.\"\"\"\n self._append_settings(\n proj_path / self.project_name,\n f\"TWISTED_REACTOR = '{_asyncio_reactor_path}'\\n\",\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n _, _, err = proc(self.name, spider, cwd=proj_path)\n assert self.ASYNC_MSG in err\n assert (\n \"The installed reactor (twisted.internet.asyncioreactor.AsyncioSelectorReactor)\"\n \" does not match the requested one\"\n \" (twisted.internet.selectreactor.SelectReactor)\"\n ) in err\n\n def test_project_asyncio_spider_settings_select_forced(\n self, proj_path: Path\n ) -> None:\n \"\"\"The reactor is set via the project settings to the asyncio value\n and via the spider settings to the select value, CrawlerProcess is\n forced via the project settings. The reactor is chosen based on the\n spider settings.\n\n CrawlerProcess, the select reactor, only the normal spider works.\"\"\"\n self._append_settings(\n proj_path / self.project_name, \"FORCE_CRAWLER_PROCESS = True\\n\"\n )\n for spider in [\"sp\", \"aiosp\"]:\n self._replace_custom_settings(\n proj_path / self.project_name,\n spider,\n \"{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}\",\n )\n\n self._assert_spider_works(self.NORMAL_MSG, proj_path, \"sp\")\n self._assert_spider_asyncio_fail(self.NORMAL_MSG, proj_path, \"aiosp\")\n\n\nclass TestMiscCommands(TestProjectBase):\n def test_list(self, proj_path: Path) -> None:\n assert call(\"list\", cwd=proj_path) == 0\n\n def test_list_subdir(self, proj_path: Path) -> None:\n \"\"\"Test that commands work in a subdirectory of the project.\"\"\"\n subdir = proj_path / \"subdir\"\n subdir.mkdir(exist_ok=True)\n assert call(\"list\", cwd=subdir) == 0\n\n def test_command_not_found(self) -> None:\n na_msg = \"\"\"\nThe list command is not available from this location.\nThese commands are only available from within a project: check, crawl, edit, list, parse.\n\"\"\"\n not_found_msg = \"\"\"\nUnknown command: abc\n\"\"\"\n params = [\n (\"list\", False, na_msg),\n (\"abc\", False, not_found_msg),\n (\"abc\", True, not_found_msg),\n ]\n for cmdname, inproject, message in params:\n with mock.patch(\"sys.stdout\", new=StringIO()) as out:\n _print_unknown_command_msg(Settings(), cmdname, inproject)\n assert out.getvalue().strip() == message.strip()\n\n\nclass TestBenchCommand:\n def test_run(self) -> None:\n _, _, err = proc(\n \"bench\",\n \"-s\",\n \"LOGSTATS_INTERVAL=0.001\",\n \"-s\",\n \"CLOSESPIDER_TIMEOUT=0.01\",\n )\n assert \"INFO: Crawled\" in err\n assert \"Unhandled Error\" not in err\n assert \"log_count/ERROR\" not in err\n\n\nclass TestViewCommand:\n def test_methods(self) -> None:\n command = view.Command()\n command.settings = Settings()\n parser = argparse.ArgumentParser(\n prog=\"scrapy\",\n prefix_chars=\"-\",\n formatter_class=ScrapyHelpFormatter,\n conflict_handler=\"resolve\",\n )\n command.add_options(parser)\n assert command.short_desc() == \"Open URL in browser, as seen by Scrapy\"\n assert \"URL using the Scrapy downloader and show its\" in command.long_desc()\n\n\nclass TestHelpMessage(TestProjectBase):\n COMMANDS = [\n \"parse\",\n \"startproject\",\n \"view\",\n \"crawl\",\n \"edit\",\n \"list\",\n \"fetch\",\n \"settings\",\n \"shell\",\n \"runspider\",\n \"version\",\n \"genspider\",\n \"check\",\n \"bench\",\n ]\n\n def test_help_messages(self, proj_path: Path) -> None:\n for command in self.COMMANDS:\n _, out, _ = proc(command, \"-h\", cwd=proj_path)\n assert \"Usage\" in out\n\n\nclass TestPopCommandName:\n def test_valid_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"my_spider\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"my_spider\"]\n\n def test_no_command(self) -> None:\n argv = [\"scrapy\"]\n command = _pop_command_name(argv)\n assert command is None\n assert argv == [\"scrapy\"]\n\n def test_option_before_command(self) -> None:\n argv = [\"scrapy\", \"-h\", \"crawl\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n\n def test_option_after_command(self) -> None:\n argv = [\"scrapy\", \"crawl\", \"-h\"]\n command = _pop_command_name(argv)\n assert command == \"crawl\"\n assert argv == [\"scrapy\", \"-h\"]\n", "framework": "pytest", "test_command": "pytest tests/test_commands.py::TestHelpMessage::test_help_messages -xvs"}, {"test_file": "tests/test_command_fetch.py", "test_function": "TestFetchCommand.test_output", "test_content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestFetchCommand:\n def test_output(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"))\n assert out.strip() == \"Works\"\n\n def test_redirect_default(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/redirect\"))\n assert out.strip() == \"Redirected here\"\n\n def test_redirect_disabled(self, mockserver: MockServer) -> None:\n _, _, err = proc(\n \"fetch\", \"--no-redirect\", mockserver.url(\"/redirect-no-meta-refresh\")\n )\n err = err.strip()\n assert \"downloader/response_status_count/302\" in err\n assert \"downloader/response_status_count/200\" not in err\n\n def test_headers(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"), \"--headers\")\n out = out.replace(\"\\r\", \"\") # required on win32\n assert \"Server: TwistedWeb\" in out\n assert \"Content-Type: text/plain\" in out\n", "framework": "pytest", "test_command": "pytest tests/test_command_fetch.py::TestFetchCommand::test_output -xvs"}, {"test_file": "tests/test_command_fetch.py", "test_function": "TestFetchCommand.test_redirect_default", "test_content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestFetchCommand:\n def test_output(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"))\n assert out.strip() == \"Works\"\n\n def test_redirect_default(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/redirect\"))\n assert out.strip() == \"Redirected here\"\n\n def test_redirect_disabled(self, mockserver: MockServer) -> None:\n _, _, err = proc(\n \"fetch\", \"--no-redirect\", mockserver.url(\"/redirect-no-meta-refresh\")\n )\n err = err.strip()\n assert \"downloader/response_status_count/302\" in err\n assert \"downloader/response_status_count/200\" not in err\n\n def test_headers(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"), \"--headers\")\n out = out.replace(\"\\r\", \"\") # required on win32\n assert \"Server: TwistedWeb\" in out\n assert \"Content-Type: text/plain\" in out\n", "framework": "pytest", "test_command": "pytest tests/test_command_fetch.py::TestFetchCommand::test_redirect_default -xvs"}, {"test_file": "tests/test_command_fetch.py", "test_function": "TestFetchCommand.test_redirect_disabled", "test_content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestFetchCommand:\n def test_output(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"))\n assert out.strip() == \"Works\"\n\n def test_redirect_default(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/redirect\"))\n assert out.strip() == \"Redirected here\"\n\n def test_redirect_disabled(self, mockserver: MockServer) -> None:\n _, _, err = proc(\n \"fetch\", \"--no-redirect\", mockserver.url(\"/redirect-no-meta-refresh\")\n )\n err = err.strip()\n assert \"downloader/response_status_count/302\" in err\n assert \"downloader/response_status_count/200\" not in err\n\n def test_headers(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"), \"--headers\")\n out = out.replace(\"\\r\", \"\") # required on win32\n assert \"Server: TwistedWeb\" in out\n assert \"Content-Type: text/plain\" in out\n", "framework": "pytest", "test_command": "pytest tests/test_command_fetch.py::TestFetchCommand::test_redirect_disabled -xvs"}, {"test_file": "tests/test_command_fetch.py", "test_function": "TestFetchCommand.test_headers", "test_content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom tests.utils.cmdline import proc\n\nif TYPE_CHECKING:\n from tests.mockserver.http import MockServer\n\n\nclass TestFetchCommand:\n def test_output(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"))\n assert out.strip() == \"Works\"\n\n def test_redirect_default(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/redirect\"))\n assert out.strip() == \"Redirected here\"\n\n def test_redirect_disabled(self, mockserver: MockServer) -> None:\n _, _, err = proc(\n \"fetch\", \"--no-redirect\", mockserver.url(\"/redirect-no-meta-refresh\")\n )\n err = err.strip()\n assert \"downloader/response_status_count/302\" in err\n assert \"downloader/response_status_count/200\" not in err\n\n def test_headers(self, mockserver: MockServer) -> None:\n _, out, _ = proc(\"fetch\", mockserver.url(\"/text\"), \"--headers\")\n out = out.replace(\"\\r\", \"\") # required on win32\n assert \"Server: TwistedWeb\" in out\n assert \"Content-Type: text/plain\" in out\n", "framework": "pytest", "test_command": "pytest tests/test_command_fetch.py::TestFetchCommand::test_headers -xvs"}, {"test_file": "tests/test_command_startproject.py", "test_function": "TestStartprojectTemplates.test_startproject_template_override", "test_content": "from __future__ import annotations\n\nimport os\nimport subprocess\nimport sys\nfrom contextlib import contextmanager\nfrom itertools import chain\nfrom pathlib import Path\nfrom shutil import copytree\nfrom stat import S_IWRITE as ANYONE_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands.startproject import IGNORE\nfrom scrapy.utils.test import get_testenv\nfrom tests.utils.cmdline import call, proc\n\n\nclass TestStartprojectCommand:\n project_name = \"testproject\"\n\n @staticmethod\n def _assert_files_exist(project_dir: Path, project_name: str) -> None:\n assert (project_dir / \"scrapy.cfg\").exists()\n assert (project_dir / project_name).exists()\n assert (project_dir / project_name / \"__init__.py\").exists()\n assert (project_dir / project_name / \"items.py\").exists()\n assert (project_dir / project_name / \"pipelines.py\").exists()\n assert (project_dir / project_name / \"settings.py\").exists()\n assert (project_dir / project_name / \"spiders\" / \"__init__.py\").exists()\n\n def test_startproject(self, tmp_path: Path) -> None:\n # with no dir argument creates the project in the \"self.project_name\" subdir of cwd\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 0\n self._assert_files_exist(tmp_path / self.project_name, self.project_name)\n\n assert call(\"startproject\", self.project_name, cwd=tmp_path) == 1\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n\n def test_startproject_with_project_dir(self, tmp_path: Path) -> None:\n # with a dir arg creates the project in the specified dir\n project_dir = tmp_path / \"project\"\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 0\n )\n self._assert_files_exist(project_dir, self.project_name)\n\n assert (\n call(\n \"startproject\", self.project_name, str(project_dir) + \"2\", cwd=tmp_path\n )\n == 0\n )\n\n assert (\n call(\"startproject\", self.project_name, str(project_dir), cwd=tmp_path) == 1\n )\n assert (\n call(\n \"startproject\", self.project_name + \"2\", str(project_dir), cwd=tmp_path\n )\n == 1\n )\n assert call(\"startproject\", \"wrong---project---name\") == 1\n assert call(\"startproject\", \"sys\") == 1\n assert call(\"startproject\") == 2\n assert (\n call(\"startproject\", self.project_name, str(project_dir), \"another_params\")\n == 2\n )\n\n def test_existing_project_dir(self, tmp_path: Path) -> None:\n project_name = self.project_name + \"_existing\"\n project_path = tmp_path / project_name\n project_path.mkdir()\n\n assert call(\"startproject\", project_name, cwd=tmp_path) == 0\n self._assert_files_exist(project_path, project_name)\n\n\ndef get_permissions_dict(\n path: str | os.PathLike, renamings=None, ignore=None\n) -> dict[str, str]:\n def get_permissions(path: Path) -> str:\n return oct(path.stat().st_mode)\n\n path_obj = Path(path)\n\n renamings = renamings or ()\n permissions_dict = {\n \".\": get_permissions(path_obj),\n }\n for root, dirs, files in os.walk(path_obj):\n nodes = list(chain(dirs, files))\n if ignore:\n ignored_names = ignore(root, nodes)\n nodes = [node for node in nodes if node not in ignored_names]\n for node in nodes:\n absolute_path = Path(root, node)\n relative_path = str(absolute_path.relative_to(path))\n for search_string, replacement in renamings:\n relative_path = relative_path.replace(search_string, replacement)\n permissions = get_permissions(absolute_path)\n permissions_dict[relative_path] = permissions\n return permissions_dict\n\n\nclass TestStartprojectTemplates:\n def test_startproject_template_override(self, tmp_path: Path) -> None:\n tmpl = tmp_path / \"templates\"\n tmpl_proj = tmpl / \"project\"\n project_name = \"testproject\"\n\n copytree(Path(scrapy.__path__[0], \"templates\"), tmpl)\n (tmpl_proj / \"root_template\").write_bytes(b\"\")\n\n args = [\"--set\", f\"TEMPLATES_DIR={tmpl}\"]\n _, out, _ = proc(\"startproject\", project_name, *args, cwd=tmp_path)\n assert f\"New Scrapy project '{project_name}', using template directory\" in out\n assert str(tmpl_proj) in out\n assert (tmp_path / project_name / \"root_template\").exists()\n\n def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has the same permissions as in the project, i.e.\n everything is writable.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject1\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n process = subprocess.Popen(\n (\n sys.executable,\n \"-m\",\n \"scrapy.cmdline\",\n \"startproject\",\n project_name,\n ),\n cwd=destination,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n env=get_testenv(),\n )\n process.wait()\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n template folder has been made read-only, which is something that some\n systems do.\n\n See https://github.com/scrapy/scrapy/pull/4604\n \"\"\"\n scrapy_path = scrapy.__path__[0]\n templates_dir = Path(scrapy_path, \"templates\")\n project_template = Path(templates_dir, \"project\")\n project_name = \"startproject2\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n def _make_read_only(path: Path):\n current_permissions = path.stat().st_mode\n path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)\n\n read_only_templates_dir = tmp_path / \"templates\"\n copytree(templates_dir, read_only_templates_dir)\n\n for root, dirs, files in os.walk(read_only_templates_dir):\n for node in chain(dirs, files):\n _make_read_only(Path(root, node))\n\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert (\n call(\n \"startproject\",\n project_name,\n \"--set\",\n f\"TEMPLATES_DIR={read_only_templates_dir}\",\n cwd=destination,\n )\n == 0\n )\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_unchanged_in_destination(\n self, tmp_path: Path\n ) -> None:\n \"\"\"Check that preexisting folders and files in the destination folder\n do not see their permissions modified.\"\"\"\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"startproject3\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n destination = tmp_path / \"proj\"\n project_dir = destination / project_name\n project_dir.mkdir(parents=True)\n\n existing_nodes = {\n f\"{permissions:o}{extension}\": permissions\n for extension in (\"\", \".d\")\n for permissions in (\n 0o444,\n 0o555,\n 0o644,\n 0o666,\n 0o755,\n 0o777,\n )\n }\n for node, permissions in existing_nodes.items():\n path = project_dir / node\n if node.endswith(\".d\"):\n path.mkdir(mode=permissions)\n else:\n path.touch(mode=permissions)\n expected_permissions[node] = oct(path.stat().st_mode)\n\n assert call(\"startproject\", project_name, \".\", cwd=project_dir) == 0\n\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n\n def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:\n \"\"\"Check that generated files have the right permissions when the\n system uses a umask value that causes new files to have different\n permissions than those from the template folder.\"\"\"\n\n @contextmanager\n def umask(new_mask):\n cur_mask = os.umask(new_mask)\n yield\n os.umask(cur_mask)\n\n scrapy_path = scrapy.__path__[0]\n project_template = Path(scrapy_path, \"templates\", \"project\")\n project_name = \"umaskproject\"\n renamings = (\n (\"module\", project_name),\n (\".tmpl\", \"\"),\n )\n expected_permissions = get_permissions_dict(\n project_template,\n renamings,\n IGNORE,\n )\n\n with umask(0o002):\n destination = tmp_path / \"proj\"\n destination.mkdir()\n assert call(\"startproject\", project_name, cwd=destination) == 0\n\n project_dir = destination / project_name\n actual_permissions = get_permissions_dict(project_dir)\n\n assert actual_permissions == expected_permissions\n", "framework": "pytest", "test_command": "pytest tests/test_command_startproject.py::TestStartprojectTemplates::test_startproject_template_override -xvs"}] | {"repo_url": "https://github.com/scrapy/scrapy", "install_cmd": "pip install -e .", "commit_sha": "e02ad08672a5946f659acf4874c4a315e7886346", "frozen_requirements": "frozen_requirements/scrapy_scrapy.txt"} | {"body_lines": 14, "file_lines": 39, "has_docstring": false, "num_tests": 50} | {"status": "partial_pass", "note": "environment-specific test failures"} | repo_patch/0051 | clean | |
repo_patch/0035 | Textualize/rich | rich/_unicode_data/__init__.py | load | load | function | null | from __future__ import annotations
import bisect
import os
import sys
if sys.version_info[:2] >= (3, 9):
from functools import cache
else:
from functools import lru_cache as cache # pragma: no cover
from importlib import import_module
from typing import TYPE_CHECKING, cast
from rich._unicode_data._versions import VERSIONS
if TYPE_CHECKING:
from rich.cells import CellTable
VERSION_ORDER = sorted(
[
tuple(
map(int, version.split(".")),
)
for version in VERSIONS
]
)
VERSION_SET = frozenset(VERSIONS)
def _parse_version(version: str) -> tuple[int, int, int]:
"""Parse a version string into a tuple of 3 integers.
Args:
version: A version string.
Raises:
ValueError: If the version string is invalid.
Returns:
A tuple of 3 integers.
"""
version_integers: tuple[int, ...]
try:
version_integers = tuple(
map(int, version.split(".")),
)
except ValueError:
raise ValueError(
f"unicode version string {version!r} is badly formatted"
) from None
while len(version_integers) < 3:
version_integers = version_integers + (0,)
triple = cast("tuple[int, int, int]", version_integers[:3])
return triple
@cache
def load(unicode_version: str = "auto") -> CellTable:
"""Load a cell table for the given unicode version.
Args:
unicode_version: Unicode version, or `None` to auto-detect.
"""
# TODO: Implement this function | def load(unicode_version: str = "auto") -> CellTable:
"""Load a cell table for the given unicode version.
Args:
unicode_version: Unicode version, or `None` to auto-detect.
""" | Load a cell table for the given unicode version.
Args:
unicode_version: Unicode version, or `None` to auto-detect. | if unicode_version == "auto":
unicode_version = os.environ.get("UNICODE_VERSION", "latest")
try:
_parse_version(unicode_version)
except ValueError:
# The environment variable is invalid
# Fallback to using the latest version seems reasonable
unicode_version = "latest"
if unicode_version == "latest":
version = VERSIONS[-1]
else:
try:
version_numbers = _parse_version(unicode_version)
except ValueError:
version_numbers = _parse_version(VERSIONS[-1])
major, minor, patch = version_numbers
version = f"{major}.{minor}.{patch}"
if version not in VERSION_SET:
insert_position = bisect.bisect_left(VERSION_ORDER, version_numbers)
version = VERSIONS[max(0, insert_position - 1)]
version_path_component = version.replace(".", "-")
module_name = f".unicode{version_path_component}"
module = import_module(module_name, "rich._unicode_data")
if TYPE_CHECKING:
assert isinstance(module.cell_table, CellTable)
return module.cell_table | def load(unicode_version: str = "auto") -> CellTable:
"""Load a cell table for the given unicode version.
Args:
unicode_version: Unicode version, or `None` to auto-detect.
"""
if unicode_version == "auto":
unicode_version = os.environ.get("UNICODE_VERSION", "latest")
try:
_parse_version(unicode_version)
except ValueError:
# The environment variable is invalid
# Fallback to using the latest version seems reasonable
unicode_version = "latest"
if unicode_version == "latest":
version = VERSIONS[-1]
else:
try:
version_numbers = _parse_version(unicode_version)
except ValueError:
version_numbers = _parse_version(VERSIONS[-1])
major, minor, patch = version_numbers
version = f"{major}.{minor}.{patch}"
if version not in VERSION_SET:
insert_position = bisect.bisect_left(VERSION_ORDER, version_numbers)
version = VERSIONS[max(0, insert_position - 1)]
version_path_component = version.replace(".", "-")
module_name = f".unicode{version_path_component}"
module = import_module(module_name, "rich._unicode_data")
if TYPE_CHECKING:
assert isinstance(module.cell_table, CellTable)
return module.cell_table | [{"test_file": "tests/test_unicode_data.py", "test_function": "test_load", "test_content": "from __future__ import annotations\n\nimport pytest\n\nfrom rich._unicode_data import VERSIONS, _parse_version, load\n\n\ndef test_load():\n \"\"\"Test all versions may be loaded.\"\"\"\n for version in VERSIONS:\n load(version)\n\n\n@pytest.mark.parametrize(\n \"version_str,version_tuple\",\n [\n (\"1\", (1, 0, 0)),\n (\"1.0\", (1, 0, 0)),\n (\"1.2\", (1, 2, 0)),\n (\"1.2.3\", (1, 2, 3)),\n ],\n)\ndef test_parse_version(version_str: str, version_tuple: tuple[str, ...]) -> None:\n assert _parse_version(version_str) == version_tuple\n\n\n@pytest.mark.parametrize(\n \"version_in,version_selected\",\n [\n # Lower versions will pick the first (4.1.0)\n (\"0\", \"4.1.0\"),\n (\"1\", \"4.1.0\"),\n (\"1.0\", \"4.1.0\"),\n (\"1.0.0\", \"4.1.0\"),\n (\"4.0.0\", \"4.1.0\"),\n (\"4.0.2\", \"4.1.0\"),\n (\"4.1.0\", \"4.1.0\"),\n (\"4.1.1\", \"4.1.0\"),\n (\"4.2.1\", \"4.1.0\"),\n # Nearest version lower\n (\"5\", \"5.0.0\"),\n (\"5.0\", \"5.0.0\"),\n (\"5.0.0\", \"5.0.0\"),\n (\"5.0.1\", \"5.0.0\"),\n (\"5.1.0\", \"5.1.0\"),\n (\"5.1.1\", \"5.1.0\"),\n # Maximum version if greater than the maximum\n (\"17.0.0\", \"17.0.0\"),\n (\"17.0.1\", \"17.0.0\"),\n (\"17.1.0\", \"17.0.0\"),\n # Greater than the maximum\n (\"18.0.0\", \"17.0.0\"),\n ],\n)\ndef test_load_version(version_in: str, version_selected: str) -> None:\n \"\"\"Test that load will pick the nearest lower version if it exists, or the lowest version if below the first available version.\"\"\"\n assert load(version_in).unicode_version == version_selected\n\n\ndef test_load_version_invalid() -> None:\n \"\"\"Check that invalid versions load the latest unicode data.\"\"\"\n assert load(\"foo\").unicode_version == \"17.0.0\"\n assert load(\"a.b.c\").unicode_version == \"17.0.0\"\n assert load(\"1.2.3a\").unicode_version == \"17.0.0\"\n", "framework": "pytest", "test_command": "pytest tests/test_unicode_data.py::test_load -xvs"}, {"test_file": "tests/test_unicode_data.py", "test_function": "test_load_version", "test_content": "from __future__ import annotations\n\nimport pytest\n\nfrom rich._unicode_data import VERSIONS, _parse_version, load\n\n\ndef test_load():\n \"\"\"Test all versions may be loaded.\"\"\"\n for version in VERSIONS:\n load(version)\n\n\n@pytest.mark.parametrize(\n \"version_str,version_tuple\",\n [\n (\"1\", (1, 0, 0)),\n (\"1.0\", (1, 0, 0)),\n (\"1.2\", (1, 2, 0)),\n (\"1.2.3\", (1, 2, 3)),\n ],\n)\ndef test_parse_version(version_str: str, version_tuple: tuple[str, ...]) -> None:\n assert _parse_version(version_str) == version_tuple\n\n\n@pytest.mark.parametrize(\n \"version_in,version_selected\",\n [\n # Lower versions will pick the first (4.1.0)\n (\"0\", \"4.1.0\"),\n (\"1\", \"4.1.0\"),\n (\"1.0\", \"4.1.0\"),\n (\"1.0.0\", \"4.1.0\"),\n (\"4.0.0\", \"4.1.0\"),\n (\"4.0.2\", \"4.1.0\"),\n (\"4.1.0\", \"4.1.0\"),\n (\"4.1.1\", \"4.1.0\"),\n (\"4.2.1\", \"4.1.0\"),\n # Nearest version lower\n (\"5\", \"5.0.0\"),\n (\"5.0\", \"5.0.0\"),\n (\"5.0.0\", \"5.0.0\"),\n (\"5.0.1\", \"5.0.0\"),\n (\"5.1.0\", \"5.1.0\"),\n (\"5.1.1\", \"5.1.0\"),\n # Maximum version if greater than the maximum\n (\"17.0.0\", \"17.0.0\"),\n (\"17.0.1\", \"17.0.0\"),\n (\"17.1.0\", \"17.0.0\"),\n # Greater than the maximum\n (\"18.0.0\", \"17.0.0\"),\n ],\n)\ndef test_load_version(version_in: str, version_selected: str) -> None:\n \"\"\"Test that load will pick the nearest lower version if it exists, or the lowest version if below the first available version.\"\"\"\n assert load(version_in).unicode_version == version_selected\n\n\ndef test_load_version_invalid() -> None:\n \"\"\"Check that invalid versions load the latest unicode data.\"\"\"\n assert load(\"foo\").unicode_version == \"17.0.0\"\n assert load(\"a.b.c\").unicode_version == \"17.0.0\"\n assert load(\"1.2.3a\").unicode_version == \"17.0.0\"\n", "framework": "pytest", "test_command": "pytest tests/test_unicode_data.py::test_load_version -xvs"}, {"test_file": "tests/test_unicode_data.py", "test_function": "test_load_version_invalid", "test_content": "from __future__ import annotations\n\nimport pytest\n\nfrom rich._unicode_data import VERSIONS, _parse_version, load\n\n\ndef test_load():\n \"\"\"Test all versions may be loaded.\"\"\"\n for version in VERSIONS:\n load(version)\n\n\n@pytest.mark.parametrize(\n \"version_str,version_tuple\",\n [\n (\"1\", (1, 0, 0)),\n (\"1.0\", (1, 0, 0)),\n (\"1.2\", (1, 2, 0)),\n (\"1.2.3\", (1, 2, 3)),\n ],\n)\ndef test_parse_version(version_str: str, version_tuple: tuple[str, ...]) -> None:\n assert _parse_version(version_str) == version_tuple\n\n\n@pytest.mark.parametrize(\n \"version_in,version_selected\",\n [\n # Lower versions will pick the first (4.1.0)\n (\"0\", \"4.1.0\"),\n (\"1\", \"4.1.0\"),\n (\"1.0\", \"4.1.0\"),\n (\"1.0.0\", \"4.1.0\"),\n (\"4.0.0\", \"4.1.0\"),\n (\"4.0.2\", \"4.1.0\"),\n (\"4.1.0\", \"4.1.0\"),\n (\"4.1.1\", \"4.1.0\"),\n (\"4.2.1\", \"4.1.0\"),\n # Nearest version lower\n (\"5\", \"5.0.0\"),\n (\"5.0\", \"5.0.0\"),\n (\"5.0.0\", \"5.0.0\"),\n (\"5.0.1\", \"5.0.0\"),\n (\"5.1.0\", \"5.1.0\"),\n (\"5.1.1\", \"5.1.0\"),\n # Maximum version if greater than the maximum\n (\"17.0.0\", \"17.0.0\"),\n (\"17.0.1\", \"17.0.0\"),\n (\"17.1.0\", \"17.0.0\"),\n # Greater than the maximum\n (\"18.0.0\", \"17.0.0\"),\n ],\n)\ndef test_load_version(version_in: str, version_selected: str) -> None:\n \"\"\"Test that load will pick the nearest lower version if it exists, or the lowest version if below the first available version.\"\"\"\n assert load(version_in).unicode_version == version_selected\n\n\ndef test_load_version_invalid() -> None:\n \"\"\"Check that invalid versions load the latest unicode data.\"\"\"\n assert load(\"foo\").unicode_version == \"17.0.0\"\n assert load(\"a.b.c\").unicode_version == \"17.0.0\"\n assert load(\"1.2.3a\").unicode_version == \"17.0.0\"\n", "framework": "pytest", "test_command": "pytest tests/test_unicode_data.py::test_load_version_invalid -xvs"}] | {"repo_url": "https://github.com/Textualize/rich", "install_cmd": "pip install -e .", "commit_sha": "fc41075a3206d2a5fd846c6f41c4d2becab814fa", "frozen_requirements": "frozen_requirements/Textualize_rich.txt"} | {"body_lines": 26, "file_lines": 94, "has_docstring": true, "num_tests": 3} | {"status": "validated", "tests_run": "docker"} | repo_patch/0053 | clean |
repo_patch/0036 | deepspeedai/DeepSpeed | deepspeed/runtime/precision_config.py | get_bfloat16_config | get_bfloat16_config | function | null | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from deepspeed.runtime.config_utils import DeepSpeedConfigModel
from .fp16.loss_scaler import (
INITIAL_LOSS_SCALE,
SCALE_WINDOW,
DELAYED_SHIFT,
CONSECUTIVE_HYSTERESIS,
MIN_LOSS_SCALE,
)
#########################################
# BFLOAT16 support
#########################################
# BFLOAT16 feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
BFLOAT16_FORMAT = '''
BFLOAT16 parameters should be of the format:
"bf16": {
"enabled": true,
"immediate_grad_update": false,
"check_grad_overflow": false
}
'''
BFLOAT16 = "bf16"
BFLOAT16_OLD = "bfloat16" # keeping for backwards compatibility
def get_bfloat16_config(param_dict):
# TODO: Implement this function
class DeepSpeedBF16Config(DeepSpeedConfigModel):
"""
For bfloat16 configuration
"""
enabled: bool = False
"""
Enable bfloat16 mixed-precision training/inference
"""
immediate_grad_update: bool = False
"""
Apply gradient updates immediately rather than delayed.
"""
check_grad_overflow: bool = False
"""
Check for gradient overflows and underflows
"""
bf16_master_weights_and_grads: bool = False
"""
Maintain master weights/gradients in bf16 precision for ZeRO optimizer.
"""
bf16_optimizer_states: bool = False
"""
Keep optimizer states in bf16 (only valid when bf16_master_weights_and_grads is enabled).
"""
#########################################
# FP16 support
#########################################
# FP16 feature. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
FP16_FORMAT = '''
FP16 parameters should be of the format:
"fp16": {
"enabled": true,
"auto_cast": false,
"loss_scale": 0,
"initial_scale_power": 16,
"loss_scale_window": 1000,
"hysteresis": 2,
"consecutive_hysteresis": false,
"min_loss_scale": 1
}
'''
FP16 = "fp16"
def get_float16_config(param_dict):
fp16_config_dict = param_dict.get(FP16, {})
return DeepSpeedFP16Config(**fp16_config_dict)
class DeepSpeedFP16Config(DeepSpeedConfigModel):
"""
For float16 configuration
"""
enabled: bool = False
"""
Enable fp16 mixed-precision training/inference
"""
auto_cast: bool = False
"""
Automatically cast inputs to fp16
"""
loss_scale: float = 0
"""
Loss scaling value. Default value of 0 means dynamic loss scaling instead of static loss scale.
"""
initial_scale_power: int = 16
"""
For dynamic loss scaling, set initial loss scale to 2^{initial_scale_power}.
"""
loss_scale_window: int = 1000
"""
Iteration intervals for raising/lowering dynamic loss scale value.
"""
hysteresis: int = 2
"""
Delay shift in dynamic loss scaling.
"""
consecutive_hysteresis: bool = False
"""
Refill hysteresis if iteration does not overflow/underflow.
"""
min_loss_scale: int = 1
"""
Minimum dynamic loss scale value.
"""
fp16_master_weights_and_grads: bool = False
"""
Maintain master weights in optimizer state as fp16 instead of fp32 (valid with DeepSpeedCPUAdam only).
"""
def initial_dynamic_scale(self):
return 2**self.initial_scale_power
def dynamic_loss_scale_args(self):
return {
INITIAL_LOSS_SCALE: 2**self.initial_scale_power,
SCALE_WINDOW: self.loss_scale_window,
DELAYED_SHIFT: self.hysteresis,
CONSECUTIVE_HYSTERESIS: self.consecutive_hysteresis,
MIN_LOSS_SCALE: self.min_loss_scale,
} | def get_bfloat16_config(param_dict): | bf16_config_dict = param_dict.get(BFLOAT16, None)
if bf16_config_dict is None:
bf16_config_dict = param_dict.get(BFLOAT16_OLD, {})
return DeepSpeedBF16Config(**bf16_config_dict) | def get_bfloat16_config(param_dict):
bf16_config_dict = param_dict.get(BFLOAT16, None)
if bf16_config_dict is None:
bf16_config_dict = param_dict.get(BFLOAT16_OLD, {})
return DeepSpeedBF16Config(**bf16_config_dict) | [{"test_file": "tests/unit/runtime/test_ds_config_dict.py", "test_function": "test_get_bfloat16_enabled", "test_content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\n# A test on its own\nimport os\nimport pytest\nimport json\nimport hjson\nimport argparse\nimport torch\n\nfrom deepspeed.runtime.zero.config import DeepSpeedZeroConfig\nfrom deepspeed.accelerator import get_accelerator\n\nfrom unit.common import DistributedTest, get_test_path\nfrom unit.simple_model import SimpleModel, create_config_from_dict, random_dataloader\nimport deepspeed.comm as dist\n\n# A test on its own\nimport deepspeed\nfrom deepspeed.runtime.config import DeepSpeedConfig\nfrom deepspeed.runtime.precision_config import get_bfloat16_config\n\n\nclass TestBasicConfig(DistributedTest):\n world_size = 1\n\n def test_accelerator(self):\n assert (get_accelerator().is_available())\n\n def test_check_version(self):\n assert hasattr(deepspeed, \"__git_hash__\")\n assert hasattr(deepspeed, \"__git_branch__\")\n assert hasattr(deepspeed, \"__version__\")\n assert hasattr(deepspeed, \"__version_major__\")\n assert hasattr(deepspeed, \"__version_minor__\")\n assert hasattr(deepspeed, \"__version_patch__\")\n\n\n@pytest.fixture\ndef base_config():\n config_dict = {\n \"train_batch_size\": 1,\n \"optimizer\": {\n \"type\": \"Adam\",\n \"params\": {\n \"lr\": 0.00015\n }\n },\n }\n return config_dict\n\n\ndef _run_batch_config(ds_config, train_batch=None, micro_batch=None, gas=None):\n ds_config.train_batch_size = train_batch\n ds_config.train_micro_batch_size_per_gpu = micro_batch\n ds_config.gradient_accumulation_steps = gas\n success = True\n try:\n ds_config._configure_train_batch_size()\n except AssertionError:\n success = False\n return success\n\n\ndef _batch_assert(status, ds_config, batch, micro_batch, gas, success):\n\n if not success:\n assert not status\n return\n\n assert ds_config.train_batch_size == batch\n assert ds_config.train_micro_batch_size_per_gpu == micro_batch\n assert ds_config.gradient_accumulation_steps == gas\n\n\n#Tests different batch config provided in deepspeed json file\n@pytest.mark.parametrize('num_ranks,batch,micro_batch,gas,success',\n [(2,32,16,1,True),\n (2,32,8,2,True),\n (2,33,17,2,False),\n (2,32,18,1,False)]) # yapf: disable\nclass TestBatchConfig(DistributedTest):\n world_size = 2\n\n def test(self, num_ranks, batch, micro_batch, gas, success):\n assert dist.get_world_size() == num_ranks, \\\n f'The test assumes a world size of {num_ranks}'\n\n ds_batch_config = get_test_path('ds_batch_config.json')\n ds_config = DeepSpeedConfig(ds_batch_config)\n\n #test cases when all parameters are provided\n status = _run_batch_config(ds_config, train_batch=batch, micro_batch=micro_batch, gas=gas)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n #test cases when two out of three parameters are provided\n status = _run_batch_config(ds_config, train_batch=batch, micro_batch=micro_batch)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n if success:\n #when gas is provided with one more parameter\n status = _run_batch_config(ds_config, train_batch=batch, gas=gas)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n status = _run_batch_config(ds_config, micro_batch=micro_batch, gas=gas)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n #test the case when only micro_batch or train_batch is provided\n if gas == 1:\n status = _run_batch_config(ds_config, micro_batch=micro_batch)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n status = _run_batch_config(ds_config, train_batch=batch)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n else:\n #when only gas is provided\n status = _run_batch_config(ds_config, gas=gas)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n #when gas is provided with something else and gas does not divide batch\n if gas != 1:\n status = _run_batch_config(ds_config, train_batch=batch, gas=gas)\n _batch_assert(status, ds_config, batch, micro_batch, gas, success)\n\n\ndef test_temp_config_json(tmpdir):\n config_dict = {\n \"train_batch_size\": 1,\n }\n config_path = create_config_from_dict(tmpdir, config_dict)\n config_json = json.load(open(config_path, 'r'))\n assert 'train_batch_size' in config_json\n\n\n@pytest.mark.parametrize(\"gather_weights_key\",\n [\"stage3_gather_16bit_weights_on_model_save\", \"stage3_gather_fp16_weights_on_model_save\"])\ndef test_gather_16bit_params_on_model_save(gather_weights_key):\n config_dict = {\n gather_weights_key: True,\n }\n config = DeepSpeedZeroConfig(**config_dict)\n\n assert config.gather_16bit_weights_on_model_save == True\n\n\n@pytest.mark.parametrize(\"bf16_key\", [\"bf16\", \"bfloat16\"])\ndef test_get_bfloat16_enabled(bf16_key):\n cfg = {\n bf16_key: {\n \"enabled\": True,\n },\n }\n assert get_bfloat16_config(cfg).enabled == True\n\n\nclass TestConfigLoad(DistributedTest):\n world_size = 1\n\n def test_dict(self, base_config):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n hidden_dim = 10\n model = SimpleModel(hidden_dim)\n model, _, _, _ = deepspeed.initialize(config=base_config, model=model, model_parameters=model.parameters())\n\n def test_json(self, base_config, tmpdir):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n config_path = os.path.join(tmpdir, \"config.json\")\n with open(config_path, 'w') as fp:\n json.dump(base_config, fp)\n hidden_dim = 10\n model = SimpleModel(hidden_dim)\n model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters())\n\n def test_hjson(self, base_config, tmpdir):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n config_path = os.path.join(tmpdir, \"config.json\")\n with open(config_path, 'w') as fp:\n hjson.dump(base_config, fp)\n hidden_dim = 10\n model = SimpleModel(hidden_dim)\n model, _, _, _ = deepspeed.initialize(config=config_path, model=model, model_parameters=model.parameters())\n\n\nclass TestDeprecatedDeepScaleConfig(DistributedTest):\n world_size = 1\n\n def test(self, base_config, tmpdir):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n config_path = create_config_from_dict(tmpdir, base_config)\n parser = argparse.ArgumentParser()\n args = parser.parse_args(args='')\n args.deepscale_config = config_path\n args.local_rank = 0\n\n hidden_dim = 10\n\n model = SimpleModel(hidden_dim)\n model, _, _, _ = deepspeed.initialize(args=args, model=model, model_parameters=model.parameters())\n data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device)\n for n, batch in enumerate(data_loader):\n loss = model(batch[0], batch[1])\n model.backward(loss)\n model.step()\n\n\nclass TestDistInit(DistributedTest):\n world_size = 1\n\n def test(self, base_config):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n hidden_dim = 10\n\n model = SimpleModel(hidden_dim)\n model, _, _, _ = deepspeed.initialize(config=base_config,\n model=model,\n model_parameters=model.parameters(),\n dist_init_required=True)\n data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=hidden_dim, device=model.device)\n for n, batch in enumerate(data_loader):\n loss = model(batch[0], batch[1])\n model.backward(loss)\n model.step()\n\n\nclass TestInitNoOptimizer(DistributedTest):\n world_size = 1\n\n def test(self, base_config):\n if get_accelerator().device_name() == \"cpu\":\n pytest.skip(\"This test timesout with CPU accelerator\")\n\n # XXX: the bf16 path w/ no optimizer needs to be fixed\n # if get_accelerator().is_bf16_supported():\n # base_config[\"bf16\"] = {\"enabled\": True}\n dtype = torch.float\n if get_accelerator().is_fp16_supported():\n dtype = torch.float16\n base_config[\"fp16\"] = {\"enabled\": True}\n\n del base_config[\"optimizer\"]\n hidden_dim = 10\n\n model = SimpleModel(hidden_dim=hidden_dim)\n model, _, _, _ = deepspeed.initialize(config=base_config, model=model)\n data_loader = random_dataloader(model=model,\n total_samples=5,\n hidden_dim=hidden_dim,\n device=model.device,\n dtype=dtype)\n for n, batch in enumerate(data_loader):\n loss = model(batch[0], batch[1])\n with pytest.raises(AssertionError):\n model.backward(loss)\n with pytest.raises(AssertionError):\n model.step()\n\n\nclass TestArgs(DistributedTest):\n world_size = 1\n\n def test_none_args(self, base_config):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n model = SimpleModel(hidden_dim=10)\n model, _, _, _ = deepspeed.initialize(args=None, model=model, config=base_config)\n data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device)\n for n, batch in enumerate(data_loader):\n loss = model(batch[0], batch[1])\n\n def test_no_args(self, base_config):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n model = SimpleModel(hidden_dim=10)\n model, _, _, _ = deepspeed.initialize(model=model, config=base_config)\n data_loader = random_dataloader(model=model, total_samples=5, hidden_dim=10, device=model.device)\n for n, batch in enumerate(data_loader):\n loss = model(batch[0], batch[1])\n\n\nclass TestNoModel(DistributedTest):\n world_size = 1\n\n def test(self, base_config):\n if get_accelerator().is_bf16_supported():\n base_config[\"bf16\"] = {\"enabled\": True}\n elif get_accelerator().is_fp16_supported():\n base_config[\"fp16\"] = {\"enabled\": True}\n model = SimpleModel(hidden_dim=10)\n with pytest.raises(AssertionError):\n model, _, _, _ = deepspeed.initialize(model=None, config=base_config)\n\n with pytest.raises(AssertionError):\n model, _, _, _ = deepspeed.initialize(model, config=base_config)\n", "framework": "pytest", "test_command": "pytest tests/unit/runtime/test_ds_config_dict.py::test_get_bfloat16_enabled -xvs"}] | {"repo_url": "https://github.com/deepspeedai/DeepSpeed", "install_cmd": "pip install -e .", "commit_sha": "a41a96b19f2b5e75567c85ff9155e4bb09c8e539", "frozen_requirements": "frozen_requirements/deepspeedai_DeepSpeed.txt"} | {"body_lines": 4, "file_lines": 157, "has_docstring": false, "num_tests": 1} | {"status": "validated", "tests_run": "docker"} | repo_patch/0054 | clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.