task_id stringlengths 15 15 | repo stringlengths 11 23 | file_path stringlengths 16 49 | function_name stringlengths 4 33 | qualified_name stringlengths 4 37 | function_type stringclasses 2 values | class_name stringclasses 8 values | prompt stringlengths 422 16.4k | signature stringlengths 11 792 | docstring stringlengths 0 549 | canonical_solution stringlengths 106 2.37k | full_function stringlengths 129 2.67k | tests stringlengths 563 4.68M | setup stringlengths 201 225 | metadata stringlengths 74 78 | validation stringlengths 36 72 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
repo_patch/0001 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | normalize_output_item | normalize_output_item | function | null | """
Job utilities for the /api/jobs endpoint.
Provides normalization and helper functions for job status tracking.
"""
from typing import Optional
from comfy_api.internal import prune_dict
class JobStatus:
"""Job status constants."""
PENDING = 'pending'
IN_PROGRESS = 'in_progress'
COMPLETED = 'completed'
FAILED = 'failed'
CANCELLED = 'cancelled'
ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED, CANCELLED]
# Media types that can be previewed in the frontend
PREVIEWABLE_MEDIA_TYPES = frozenset({'images', 'video', 'audio', '3d', 'text'})
# 3D file extensions for preview fallback (no dedicated media_type exists)
THREE_D_EXTENSIONS = frozenset({'.obj', '.fbx', '.gltf', '.glb', '.usdz'})
def has_3d_extension(filename: str) -> bool:
lower = filename.lower()
return any(lower.endswith(ext) for ext in THREE_D_EXTENSIONS)
def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
"""
# TODO: Implement this function
def normalize_outputs(outputs: dict) -> dict:
"""Normalize raw node outputs for the jobs API.
Transforms string 3D filenames into file output dicts and removes
None items. All other items (non-3D strings, dicts, etc.) are
preserved as-is.
"""
normalized = {}
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
normalized[node_id] = node_outputs
continue
normalized_node = {}
for media_type, items in node_outputs.items():
if media_type == 'animated' or not isinstance(items, list):
normalized_node[media_type] = items
continue
normalized_items = []
for item in items:
if item is None:
continue
norm = normalize_output_item(item)
normalized_items.append(norm if norm is not None else item)
normalized_node[media_type] = normalized_items
normalized[node_id] = normalized_node
return normalized
# Text preview truncation limit (1024 characters) to prevent preview_output bloat
TEXT_PREVIEW_MAX_LENGTH = 1024
def _create_text_preview(value: str) -> dict:
"""Create a text preview dict with optional truncation.
Returns:
dict with 'content' and optionally 'truncated' flag
"""
if len(value) <= TEXT_PREVIEW_MAX_LENGTH:
return {'content': value}
return {
'content': value[:TEXT_PREVIEW_MAX_LENGTH],
'truncated': True
}
def _extract_job_metadata(extra_data: dict) -> tuple[Optional[int], Optional[str]]:
"""Extract create_time and workflow_id from extra_data.
Returns:
tuple: (create_time, workflow_id)
"""
create_time = extra_data.get('create_time')
extra_pnginfo = extra_data.get('extra_pnginfo', {})
workflow_id = extra_pnginfo.get('workflow', {}).get('id')
return create_time, workflow_id
def is_previewable(media_type: str, item: dict) -> bool:
"""
Check if an output item is previewable.
Matches frontend logic in ComfyUI_frontend/src/stores/queueStore.ts
Maintains backwards compatibility with existing logic.
Priority:
1. media_type is 'images', 'video', 'audio', or '3d'
2. format field starts with 'video/' or 'audio/'
3. filename has a 3D extension (.obj, .fbx, .gltf, .glb, .usdz)
"""
if media_type in PREVIEWABLE_MEDIA_TYPES:
return True
# Check format field (MIME type).
# Maintains backwards compatibility with how custom node outputs are handled in the frontend.
fmt = item.get('format', '')
if fmt and (fmt.startswith('video/') or fmt.startswith('audio/')):
return True
# Check for 3D files by extension
filename = item.get('filename', '').lower()
if any(filename.endswith(ext) for ext in THREE_D_EXTENSIONS):
return True
return False
def normalize_queue_item(item: tuple, status: str) -> dict:
"""Convert queue item tuple to unified job dict.
Expects item with sensitive data already removed (5 elements).
"""
priority, prompt_id, _, extra_data, _ = item
create_time, workflow_id = _extract_job_metadata(extra_data)
return prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'outputs_count': 0,
'workflow_id': workflow_id,
})
def normalize_history_item(prompt_id: str, history_item: dict, include_outputs: bool = False) -> dict:
"""Convert history item dict to unified job dict.
History items have sensitive data already removed (prompt tuple has 5 elements).
"""
prompt_tuple = history_item['prompt']
priority, _, prompt, extra_data, _ = prompt_tuple
create_time, workflow_id = _extract_job_metadata(extra_data)
status_info = history_item.get('status', {})
status_str = status_info.get('status_str') if status_info else None
outputs = history_item.get('outputs', {})
outputs_count, preview_output = get_outputs_summary(outputs)
execution_error = None
execution_start_time = None
execution_end_time = None
was_interrupted = False
if status_info:
messages = status_info.get('messages', [])
for entry in messages:
if isinstance(entry, (list, tuple)) and len(entry) >= 2:
event_name, event_data = entry[0], entry[1]
if isinstance(event_data, dict):
if event_name == 'execution_start':
execution_start_time = event_data.get('timestamp')
elif event_name in ('execution_success', 'execution_error', 'execution_interrupted'):
execution_end_time = event_data.get('timestamp')
if event_name == 'execution_error':
execution_error = event_data
elif event_name == 'execution_interrupted':
was_interrupted = True
if status_str == 'success':
status = JobStatus.COMPLETED
elif status_str == 'error':
status = JobStatus.CANCELLED if was_interrupted else JobStatus.FAILED
else:
status = JobStatus.COMPLETED
job = prune_dict({
'id': prompt_id,
'status': status,
'priority': priority,
'create_time': create_time,
'execution_start_time': execution_start_time,
'execution_end_time': execution_end_time,
'execution_error': execution_error,
'outputs_count': outputs_count,
'preview_output': preview_output,
'workflow_id': workflow_id,
})
if include_outputs:
job['outputs'] = normalize_outputs(outputs)
job['execution_status'] = status_info
job['workflow'] = {
'prompt': prompt,
'extra_data': extra_data,
}
return job
def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:
"""
Count outputs and find preview in a single pass.
Returns (outputs_count, preview_output).
Preview priority (matching frontend):
1. type="output" with previewable media
2. Any previewable media
"""
count = 0
preview_output = None
fallback_preview = None
for node_id, node_outputs in outputs.items():
if not isinstance(node_outputs, dict):
continue
for media_type, items in node_outputs.items():
# 'animated' is a boolean flag, not actual output items
if media_type == 'animated' or not isinstance(items, list):
continue
for item in items:
if not isinstance(item, dict):
# Handle text outputs (non-dict items like strings or tuples)
normalized = normalize_output_item(item)
if normalized is None:
# Not a 3D file string — check for text preview
if media_type == 'text':
count += 1
if preview_output is None:
if isinstance(item, tuple):
text_value = item[0] if item else ''
else:
text_value = str(item)
text_preview = _create_text_preview(text_value)
enriched = {
**text_preview,
'nodeId': node_id,
'mediaType': media_type
}
if fallback_preview is None:
fallback_preview = enriched
continue
# normalize_output_item returned a dict (e.g. 3D file)
item = normalized
count += 1
if preview_output is not None:
continue
if is_previewable(media_type, item):
enriched = {
**item,
'nodeId': node_id,
}
if 'mediaType' not in item:
enriched['mediaType'] = media_type
if item.get('type') == 'output':
preview_output = enriched
elif fallback_preview is None:
fallback_preview = enriched
return count, preview_output or fallback_preview
def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:
"""Sort jobs list by specified field and order."""
reverse = (sort_order == 'desc')
if sort_by == 'execution_duration':
def get_sort_key(job):
start = job.get('execution_start_time', 0)
end = job.get('execution_end_time', 0)
return end - start if end and start else 0
else:
def get_sort_key(job):
return job.get('create_time', 0)
return sorted(jobs, key=get_sort_key, reverse=reverse)
def get_job(prompt_id: str, running: list, queued: list, history: dict) -> Optional[dict]:
"""
Get a single job by prompt_id from history or queue.
Args:
prompt_id: The prompt ID to look up
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
Returns:
Job dict with full details, or None if not found
"""
if prompt_id in history:
return normalize_history_item(prompt_id, history[prompt_id], include_outputs=True)
for item in running:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.IN_PROGRESS)
for item in queued:
if item[1] == prompt_id:
return normalize_queue_item(item, JobStatus.PENDING)
return None
def get_all_jobs(
running: list,
queued: list,
history: dict,
status_filter: Optional[list[str]] = None,
workflow_id: Optional[str] = None,
sort_by: str = "created_at",
sort_order: str = "desc",
limit: Optional[int] = None,
offset: int = 0
) -> tuple[list[dict], int]:
"""
Get all jobs (running, pending, completed) with filtering and sorting.
Args:
running: List of currently running queue items
queued: List of pending queue items
history: Dict of history items keyed by prompt_id
status_filter: List of statuses to include (from JobStatus.ALL)
workflow_id: Filter by workflow ID
sort_by: Field to sort by ('created_at', 'execution_duration')
sort_order: 'asc' or 'desc'
limit: Maximum number of items to return
offset: Number of items to skip
Returns:
tuple: (jobs_list, total_count)
"""
jobs = []
if status_filter is None:
status_filter = JobStatus.ALL
if JobStatus.IN_PROGRESS in status_filter:
for item in running:
jobs.append(normalize_queue_item(item, JobStatus.IN_PROGRESS))
if JobStatus.PENDING in status_filter:
for item in queued:
jobs.append(normalize_queue_item(item, JobStatus.PENDING))
history_statuses = {JobStatus.COMPLETED, JobStatus.FAILED, JobStatus.CANCELLED}
requested_history_statuses = history_statuses & set(status_filter)
if requested_history_statuses:
for prompt_id, history_item in history.items():
job = normalize_history_item(prompt_id, history_item)
if job.get('status') in requested_history_statuses:
jobs.append(job)
if workflow_id:
jobs = [j for j in jobs if j.get('workflow_id') == workflow_id]
jobs = apply_sorting(jobs, sort_by, sort_order)
total_count = len(jobs)
if offset > 0:
jobs = jobs[offset:]
if limit is not None:
jobs = jobs[:limit]
return (jobs, total_count) | def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
""" | Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts. | if item is None:
return None
if isinstance(item, str):
if has_3d_extension(item):
return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'}
return None
if isinstance(item, dict):
return item
return None | def normalize_output_item(item):
"""Normalize a single output list item for the jobs API.
Returns the normalized item, or None to exclude it.
String items with 3D extensions become {filename, type, subfolder} dicts.
"""
if item is None:
return None
if isinstance(item, str):
if has_3d_extension(item):
return {'filename': item, 'type': 'output', 'subfolder': '', 'mediaType': '3d'}
return None
if isinstance(item, dict):
return item
return None | [{"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_none_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_none_returns_none -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_string_3d_extension_synthesizes_dict", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_string_3d_extension_synthesizes_dict -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_string_non_3d_extension_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_string_non_3d_extension_returns_none -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_string_no_extension_returns_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_string_no_extension_returns_none -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_dict_passes_through", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_dict_passes_through -xvs"}, {"test_file": "tests/execution/test_jobs.py", "test_function": "TestNormalizeOutputItem.test_other_types_return_none", "test_content": "\"\"\"Unit tests for comfy_execution/jobs.py\"\"\"\n\nfrom comfy_execution.jobs import (\n JobStatus,\n is_previewable,\n normalize_queue_item,\n normalize_history_item,\n normalize_output_item,\n normalize_outputs,\n get_outputs_summary,\n apply_sorting,\n has_3d_extension,\n)\n\n\nclass TestJobStatus:\n \"\"\"Test JobStatus constants.\"\"\"\n\n def test_status_values(self):\n \"\"\"Status constants should have expected string values.\"\"\"\n assert JobStatus.PENDING == 'pending'\n assert JobStatus.IN_PROGRESS == 'in_progress'\n assert JobStatus.COMPLETED == 'completed'\n assert JobStatus.FAILED == 'failed'\n assert JobStatus.CANCELLED == 'cancelled'\n\n def test_all_contains_all_statuses(self):\n \"\"\"ALL should contain all status values.\"\"\"\n assert JobStatus.PENDING in JobStatus.ALL\n assert JobStatus.IN_PROGRESS in JobStatus.ALL\n assert JobStatus.COMPLETED in JobStatus.ALL\n assert JobStatus.FAILED in JobStatus.ALL\n assert JobStatus.CANCELLED in JobStatus.ALL\n assert len(JobStatus.ALL) == 5\n\n\nclass TestIsPreviewable:\n \"\"\"Unit tests for is_previewable()\"\"\"\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, audio, 3d, text media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio', '3d', 'text']:\n assert is_previewable(media_type, {}) is True\n\n def test_non_previewable_media_types(self):\n \"\"\"Other media types should not be previewable.\"\"\"\n for media_type in ['latents', 'metadata', 'files']:\n assert is_previewable(media_type, {}) is False\n\n def test_3d_extensions_previewable(self):\n \"\"\"3D file extensions should be previewable regardless of media_type.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n item = {'filename': f'model{ext}'}\n assert is_previewable('files', item) is True\n\n def test_3d_extensions_case_insensitive(self):\n \"\"\"3D extension check should be case insensitive.\"\"\"\n item = {'filename': 'MODEL.GLB'}\n assert is_previewable('files', item) is True\n\n def test_video_format_previewable(self):\n \"\"\"Items with video/ format should be previewable.\"\"\"\n item = {'format': 'video/mp4'}\n assert is_previewable('files', item) is True\n\n def test_audio_format_previewable(self):\n \"\"\"Items with audio/ format should be previewable.\"\"\"\n item = {'format': 'audio/wav'}\n assert is_previewable('files', item) is True\n\n def test_other_format_not_previewable(self):\n \"\"\"Items with other format should not be previewable.\"\"\"\n item = {'format': 'application/json'}\n assert is_previewable('files', item) is False\n\n\nclass TestGetOutputsSummary:\n \"\"\"Unit tests for get_outputs_summary()\"\"\"\n\n def test_empty_outputs(self):\n \"\"\"Empty outputs should return 0 count and None preview.\"\"\"\n count, preview = get_outputs_summary({})\n assert count == 0\n assert preview is None\n\n def test_counts_across_multiple_nodes(self):\n \"\"\"Outputs from multiple nodes should all be counted.\"\"\"\n outputs = {\n 'node1': {'images': [{'filename': 'a.png', 'type': 'output'}]},\n 'node2': {'images': [{'filename': 'b.png', 'type': 'output'}]},\n 'node3': {'images': [\n {'filename': 'c.png', 'type': 'output'},\n {'filename': 'd.png', 'type': 'output'}\n ]}\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 4\n\n def test_skips_animated_key_and_non_list_values(self):\n \"\"\"The 'animated' key and non-list values should be skipped.\"\"\"\n outputs = {\n 'node1': {\n 'images': [{'filename': 'test.png', 'type': 'output'}],\n 'animated': [True], # Should skip due to key name\n 'metadata': 'string', # Should skip due to non-list\n 'count': 42 # Should skip due to non-list\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n\n def test_preview_prefers_type_output(self):\n \"\"\"Items with type='output' should be preferred for preview.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp.png', 'type': 'temp'},\n {'filename': 'output.png', 'type': 'output'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview['filename'] == 'output.png'\n\n def test_preview_fallback_when_no_output_type(self):\n \"\"\"If no type='output', should use first previewable.\"\"\"\n outputs = {\n 'node1': {\n 'images': [\n {'filename': 'temp1.png', 'type': 'temp'},\n {'filename': 'temp2.png', 'type': 'temp'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['filename'] == 'temp1.png'\n\n def test_non_previewable_media_types_counted_but_no_preview(self):\n \"\"\"Non-previewable media types should be counted but not used as preview.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [\n {'filename': 'latent1.safetensors'},\n {'filename': 'latent2.safetensors'}\n ]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 2\n assert preview is None\n\n def test_previewable_media_types(self):\n \"\"\"Images, video, and audio media types should be previewable.\"\"\"\n for media_type in ['images', 'video', 'audio']:\n outputs = {\n 'node1': {\n media_type: [{'filename': 'test.file', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"{media_type} should be previewable\"\n\n def test_3d_files_previewable(self):\n \"\"\"3D file extensions should be previewable.\"\"\"\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n outputs = {\n 'node1': {\n 'files': [{'filename': f'model{ext}', 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"3D file {ext} should be previewable\"\n\n def test_format_mime_type_previewable(self):\n \"\"\"Files with video/ or audio/ format should be previewable.\"\"\"\n for fmt in ['video/x-custom', 'audio/x-custom']:\n outputs = {\n 'node1': {\n 'files': [{'filename': 'file.custom', 'format': fmt, 'type': 'output'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None, f\"Format {fmt} should be previewable\"\n\n def test_preview_enriched_with_node_metadata(self):\n \"\"\"Preview should include nodeId, mediaType, and original fields.\"\"\"\n outputs = {\n 'node123': {\n 'images': [{'filename': 'test.png', 'type': 'output', 'subfolder': 'outputs'}]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview['nodeId'] == 'node123'\n assert preview['mediaType'] == 'images'\n assert preview['subfolder'] == 'outputs'\n\n def test_string_3d_filename_creates_preview(self):\n \"\"\"String items with 3D extensions should synthesize a preview (Preview3D node output).\n Only the .glb counts — nulls and non-file strings are excluded.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 1\n assert preview is not None\n assert preview['filename'] == 'preview3d_abc123.glb'\n assert preview['mediaType'] == '3d'\n assert preview['nodeId'] == 'node1'\n assert preview['type'] == 'output'\n\n def test_string_non_3d_filename_no_preview(self):\n \"\"\"String items without 3D extensions should not create a preview.\"\"\"\n outputs = {\n 'node1': {\n 'result': ['data.json', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert count == 0\n assert preview is None\n\n def test_string_3d_filename_used_as_fallback(self):\n \"\"\"String 3D preview should be used when no dict items are previewable.\"\"\"\n outputs = {\n 'node1': {\n 'latents': [{'filename': 'latent.safetensors'}],\n },\n 'node2': {\n 'result': ['model.glb', None]\n }\n }\n count, preview = get_outputs_summary(outputs)\n assert preview is not None\n assert preview['filename'] == 'model.glb'\n assert preview['mediaType'] == '3d'\n\n\nclass TestHas3DExtension:\n \"\"\"Unit tests for has_3d_extension()\"\"\"\n\n def test_recognized_extensions(self):\n for ext in ['.obj', '.fbx', '.gltf', '.glb', '.usdz']:\n assert has_3d_extension(f'model{ext}') is True\n\n def test_case_insensitive(self):\n assert has_3d_extension('MODEL.GLB') is True\n assert has_3d_extension('Scene.GLTF') is True\n\n def test_non_3d_extensions(self):\n for name in ['photo.png', 'video.mp4', 'data.json', 'model']:\n assert has_3d_extension(name) is False\n\n\nclass TestApplySorting:\n \"\"\"Unit tests for apply_sorting()\"\"\"\n\n def test_sort_by_create_time_desc(self):\n \"\"\"Default sort by create_time descending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'desc')\n assert [j['id'] for j in result] == ['b', 'c', 'a']\n\n def test_sort_by_create_time_asc(self):\n \"\"\"Sort by create_time ascending.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100},\n {'id': 'b', 'create_time': 300},\n {'id': 'c', 'create_time': 200},\n ]\n result = apply_sorting(jobs, 'created_at', 'asc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_by_execution_duration(self):\n \"\"\"Sort by execution_duration should order by duration.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s\n {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s\n ]\n result = apply_sorting(jobs, 'execution_duration', 'desc')\n assert [j['id'] for j in result] == ['a', 'c', 'b']\n\n def test_sort_with_none_values(self):\n \"\"\"Jobs with None values should sort as 0.\"\"\"\n jobs = [\n {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100},\n {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None},\n {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200},\n ]\n result = apply_sorting(jobs, 'execution_duration', 'asc')\n assert result[0]['id'] == 'b' # None treated as 0, comes first\n\n\nclass TestNormalizeQueueItem:\n \"\"\"Unit tests for normalize_queue_item()\"\"\"\n\n def test_basic_normalization(self):\n \"\"\"Queue item should be normalized to job dict.\"\"\"\n item = (\n 10, # priority/number\n 'prompt-123', # prompt_id\n {'nodes': {}}, # prompt\n {\n 'create_time': 1234567890,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-abc'}}\n }, # extra_data\n ['node1'], # outputs_to_execute\n )\n job = normalize_queue_item(item, JobStatus.PENDING)\n\n assert job['id'] == 'prompt-123'\n assert job['status'] == 'pending'\n assert job['priority'] == 10\n assert job['create_time'] == 1234567890\n assert 'execution_start_time' not in job\n assert 'execution_end_time' not in job\n assert 'execution_error' not in job\n assert 'preview_output' not in job\n assert job['outputs_count'] == 0\n assert job['workflow_id'] == 'workflow-abc'\n\n\nclass TestNormalizeHistoryItem:\n \"\"\"Unit tests for normalize_history_item()\"\"\"\n\n def test_completed_job(self):\n \"\"\"Completed history item should have correct status and times from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5, # priority\n 'prompt-456',\n {'nodes': {}},\n {\n 'create_time': 1234567890000,\n 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}\n },\n ['node1'],\n ),\n 'status': {\n 'status_str': 'success',\n 'completed': True,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),\n ('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),\n ]\n },\n 'outputs': {},\n }\n job = normalize_history_item('prompt-456', history_item)\n\n assert job['id'] == 'prompt-456'\n assert job['status'] == 'completed'\n assert job['priority'] == 5\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567893000\n assert job['workflow_id'] == 'workflow-xyz'\n\n def test_failed_job(self):\n \"\"\"Failed history item should have failed status and error from messages.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-789',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),\n ('execution_error', {\n 'prompt_id': 'prompt-789',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'exception_message': 'CUDA out of memory',\n 'exception_type': 'RuntimeError',\n 'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-789', history_item)\n assert job['status'] == 'failed'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n assert job['execution_error']['node_id'] == '5'\n assert job['execution_error']['node_type'] == 'KSampler'\n assert job['execution_error']['exception_message'] == 'CUDA out of memory'\n\n def test_cancelled_job(self):\n \"\"\"Cancelled/interrupted history item should have cancelled status.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-cancelled',\n {'nodes': {}},\n {'create_time': 1234567890000},\n ['node1'],\n ),\n 'status': {\n 'status_str': 'error',\n 'completed': False,\n 'messages': [\n ('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),\n ('execution_interrupted', {\n 'prompt_id': 'prompt-cancelled',\n 'node_id': '5',\n 'node_type': 'KSampler',\n 'executed': ['1', '2', '3'],\n 'timestamp': 1234567891000,\n })\n ]\n },\n 'outputs': {},\n }\n\n job = normalize_history_item('prompt-cancelled', history_item)\n assert job['status'] == 'cancelled'\n assert job['execution_start_time'] == 1234567890500\n assert job['execution_end_time'] == 1234567891000\n # Cancelled jobs should not have execution_error set\n assert 'execution_error' not in job\n\n def test_include_outputs(self):\n \"\"\"When include_outputs=True, should include full output data.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-123',\n {'nodes': {'1': {}}},\n {'create_time': 1234567890, 'client_id': 'abc'},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},\n }\n job = normalize_history_item('prompt-123', history_item, include_outputs=True)\n\n assert 'outputs' in job\n assert 'workflow' in job\n assert 'execution_status' in job\n assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}}\n assert job['workflow'] == {\n 'prompt': {'nodes': {'1': {}}},\n 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'},\n }\n\n def test_include_outputs_normalizes_3d_strings(self):\n \"\"\"Detail view should transform string 3D filenames into file output dicts.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-3d',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'result': ['preview3d_abc123.glb', None, None]\n }\n },\n }\n job = normalize_history_item('prompt-3d', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n result_items = job['outputs']['node1']['result']\n assert len(result_items) == 1\n assert result_items[0] == {\n 'filename': 'preview3d_abc123.glb',\n 'type': 'output',\n 'subfolder': '',\n 'mediaType': '3d',\n }\n\n def test_include_outputs_preserves_dict_items(self):\n \"\"\"Detail view normalization should pass dict items through unchanged.\"\"\"\n history_item = {\n 'prompt': (\n 5,\n 'prompt-img',\n {'nodes': {}},\n {'create_time': 1234567890},\n ['node1'],\n ),\n 'status': {'status_str': 'success', 'completed': True, 'messages': []},\n 'outputs': {\n 'node1': {\n 'images': [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n }\n },\n }\n job = normalize_history_item('prompt-img', history_item, include_outputs=True)\n\n assert job['outputs_count'] == 1\n assert job['outputs']['node1']['images'] == [\n {'filename': 'photo.png', 'type': 'output', 'subfolder': ''},\n ]\n\n\nclass TestNormalizeOutputItem:\n \"\"\"Unit tests for normalize_output_item()\"\"\"\n\n def test_none_returns_none(self):\n assert normalize_output_item(None) is None\n\n def test_string_3d_extension_synthesizes_dict(self):\n result = normalize_output_item('model.glb')\n assert result == {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'}\n\n def test_string_non_3d_extension_returns_none(self):\n assert normalize_output_item('data.json') is None\n\n def test_string_no_extension_returns_none(self):\n assert normalize_output_item('camera_info_string') is None\n\n def test_dict_passes_through(self):\n item = {'filename': 'test.png', 'type': 'output'}\n assert normalize_output_item(item) is item\n\n def test_other_types_return_none(self):\n assert normalize_output_item(42) is None\n assert normalize_output_item(True) is None\n\n\nclass TestNormalizeOutputs:\n \"\"\"Unit tests for normalize_outputs()\"\"\"\n\n def test_empty_outputs(self):\n assert normalize_outputs({}) == {}\n\n def test_dict_items_pass_through(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n }\n }\n result = normalize_outputs(outputs)\n assert result == outputs\n\n def test_3d_string_synthesized(self):\n outputs = {\n 'node1': {\n 'result': ['model.glb', None, None],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': [\n {'filename': 'model.glb', 'type': 'output', 'subfolder': '', 'mediaType': '3d'},\n ],\n }\n }\n\n def test_animated_key_preserved(self):\n outputs = {\n 'node1': {\n 'images': [{'filename': 'a.png', 'type': 'output'}],\n 'animated': [True],\n }\n }\n result = normalize_outputs(outputs)\n assert result['node1']['animated'] == [True]\n\n def test_non_dict_node_outputs_preserved(self):\n outputs = {'node1': 'unexpected_value'}\n result = normalize_outputs(outputs)\n assert result == {'node1': 'unexpected_value'}\n\n def test_none_items_filtered_but_other_types_preserved(self):\n outputs = {\n 'node1': {\n 'result': ['data.json', None, [1, 2, 3]],\n }\n }\n result = normalize_outputs(outputs)\n assert result == {\n 'node1': {\n 'result': ['data.json', [1, 2, 3]],\n }\n }\n", "framework": "pytest", "test_command": "pytest tests/execution/test_jobs.py::TestNormalizeOutputItem::test_other_types_return_none -xvs"}] | {"repo_url": "https://github.com/Comfy-Org/ComfyUI", "install_cmd": "pip install -e .", "commit_sha": "dff0a4a15887383c90a031e3fd48ebc41f6928e7", "frozen_requirements": "frozen_requirements/Comfy-Org_ComfyUI.txt"} | {"body_lines": 9, "file_lines": 390, "has_docstring": true, "num_tests": 6} | {"status": "passed", "tests_run": 6} |
repo_patch/0002 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | normalize_queue_item | normalize_queue_item | function | null | "\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED) | "def normalize_queue_item(item: tuple, status: str) -> dict:\n \"\"\"Convert queue item tuple to (...TRUNCATED) | "Convert queue item tuple to unified job dict.\n\nExpects item with sensitive data already removed ((...TRUNCATED) | " priority, prompt_id, _, extra_data, _ = item\n create_time, workflow_id = _extract_job_metad(...TRUNCATED) | "def normalize_queue_item(item: tuple, status: str) -> dict:\n \"\"\"Convert queue item tuple to (...TRUNCATED) | "[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestNormalizeQueueItem.test(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 10, "file_lines": 390, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} |
repo_patch/0003 | Comfy-Org/ComfyUI | comfy_api/feature_flags.py | get_connection_feature | get_connection_feature | function | null | "\"\"\"\nFeature flags module for ComfyUI WebSocket protocol negotiation.\n\nThis module handles cap(...TRUNCATED) | "def get_connection_feature(\n sockets_metadata: dict[str, dict[str, Any]],\n sid: str,\n f(...TRUNCATED) | "Get a feature flag value for a specific connection.\n\nArgs:\n sockets_metadata: Dictionary of s(...TRUNCATED) | " if sid not in sockets_metadata:\n return default\n\n return sockets_metadata[sid].get(...TRUNCATED) | "def get_connection_feature(\n sockets_metadata: dict[str, dict[str, Any]],\n sid: str,\n f(...TRUNCATED) | "[{\"test_file\": \"tests-unit/feature_flags_test.py\", \"test_function\": \"TestFeatureFlags.test_g(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 3, "file_lines": 72, "has_docstring": true, "num_tests": 5} | {"status": "passed", "tests_run": 5} |
repo_patch/0004 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | apply_sorting | apply_sorting | function | null | "\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED) | "def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:\n \"\"\"Sort j(...TRUNCATED) | Sort jobs list by specified field and order. | " reverse = (sort_order == 'desc')\n\n if sort_by == 'execution_duration':\n def get_so(...TRUNCATED) | "def apply_sorting(jobs: list[dict], sort_by: str, sort_order: str) -> list[dict]:\n \"\"\"Sort j(...TRUNCATED) | "[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestApplySorting.test_sort_(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 10, "file_lines": 390, "has_docstring": true, "num_tests": 4} | {"status": "passed", "tests_run": 4} |
repo_patch/0005 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | get_outputs_summary | get_outputs_summary | function | null | "\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED) | "def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:\n \"\"\"\n Count output(...TRUNCATED) | "Count outputs and find preview in a single pass.\nReturns (outputs_count, preview_output).\n\nPrevi(...TRUNCATED) | " count = 0\n preview_output = None\n fallback_preview = None\n\n for node_id, node_outp(...TRUNCATED) | "def get_outputs_summary(outputs: dict) -> tuple[int, Optional[dict]]:\n \"\"\"\n Count output(...TRUNCATED) | "[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestGetOutputsSummary.test_(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 49, "file_lines": 390, "has_docstring": true, "num_tests": 13} | {"status": "passed", "tests_run": 13} |
repo_patch/0006 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | normalize_outputs | normalize_outputs | function | null | "\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED) | "def normalize_outputs(outputs: dict) -> dict:\n \"\"\"Normalize raw node outputs for the jobs AP(...TRUNCATED) | "Normalize raw node outputs for the jobs API.\n\nTransforms string 3D filenames into file output dic(...TRUNCATED) | " normalized = {}\n for node_id, node_outputs in outputs.items():\n if not isinstance(n(...TRUNCATED) | "def normalize_outputs(outputs: dict) -> dict:\n \"\"\"Normalize raw node outputs for the jobs AP(...TRUNCATED) | "[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestNormalizeOutputs.test_e(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 19, "file_lines": 390, "has_docstring": true, "num_tests": 6} | {"status": "passed", "tests_run": 6} |
repo_patch/0007 | Comfy-Org/ComfyUI | comfy_execution/jobs.py | is_previewable | is_previewable | function | null | "\"\"\"\nJob utilities for the /api/jobs endpoint.\nProvides normalization and helper functions for (...TRUNCATED) | "def is_previewable(media_type: str, item: dict) -> bool:\n \"\"\"\n Check if an output item i(...TRUNCATED) | "Check if an output item is previewable.\nMatches frontend logic in ComfyUI_frontend/src/stores/queu(...TRUNCATED) | " if media_type in PREVIEWABLE_MEDIA_TYPES:\n return True\n\n # Check format field (MIM(...TRUNCATED) | "def is_previewable(media_type: str, item: dict) -> bool:\n \"\"\"\n Check if an output item i(...TRUNCATED) | "[{\"test_file\": \"tests/execution/test_jobs.py\", \"test_function\": \"TestIsPreviewable.test_prev(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 12, "file_lines": 390, "has_docstring": true, "num_tests": 7} | {"status": "passed", "tests_run": 7} |
repo_patch/0008 | Comfy-Org/ComfyUI | middleware/cache_middleware.py | cache_control | cache_control | function | null | "\"\"\"Cache control middleware for ComfyUI server\"\"\"\n\nfrom aiohttp import web\nfrom typing imp(...TRUNCATED) | "async def cache_control(\n request: web.Request, handler: Callable[[web.Request], Awaitable[web.(...TRUNCATED) | "Cache control middleware that sets appropriate cache headers based on file type and response status(...TRUNCATED) | " response: web.Response = await handler(request)\n\n path_filename = request.path.rsplit(\"/\(...TRUNCATED) | "async def cache_control(\n request: web.Request, handler: Callable[[web.Request], Awaitable[web.(...TRUNCATED) | "[{\"test_file\": \"tests-unit/server_test/test_cache_control.py\", \"test_function\": \"TestCacheCo(...TRUNCATED) | "{\"repo_url\": \"https://github.com/Comfy-Org/ComfyUI\", \"install_cmd\": \"pip install -e .\", \"c(...TRUNCATED) | {"body_lines": 22, "file_lines": 54, "has_docstring": true, "num_tests": 9} | {"status": "passed", "tests_run": 9} |
repo_patch/0009 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_base_model | _get_whisper_base_model | function | null | "import logging\nfrom enum import Enum\n\nfrom pydantic import (\n AnyUrl,\n)\n\nfrom docling.dat(...TRUNCATED) | "def _get_whisper_base_model():\n \"\"\"\n Get the best Whisper Base model for the current har(...TRUNCATED) | "Get the best Whisper Base model for the current hardware.\n\nAutomatically selects MLX Whisper Base(...TRUNCATED) | " try:\n import torch\n\n has_mps = torch.backends.mps.is_built() and torch.backend(...TRUNCATED) | "def _get_whisper_base_model():\n \"\"\"\n Get the best Whisper Base model for the current har(...TRUNCATED) | "[{\"test_file\": \"tests/test_asr_mlx_whisper.py\", \"test_function\": \"TestMlxWhisperIntegration.(...TRUNCATED) | "{\"repo_url\": \"https://github.com/docling-project/docling\", \"install_cmd\": \"pip install -e .\(...TRUNCATED) | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 2} | {"status": "passed", "tests_run": 2} |
repo_patch/0010 | docling-project/docling | docling/datamodel/asr_model_specs.py | _get_whisper_tiny_model | _get_whisper_tiny_model | function | null | "import logging\nfrom enum import Enum\n\nfrom pydantic import (\n AnyUrl,\n)\n\nfrom docling.dat(...TRUNCATED) | "def _get_whisper_tiny_model():\n \"\"\"\n Get the best Whisper Tiny model for the current har(...TRUNCATED) | "Get the best Whisper Tiny model for the current hardware.\n\nAutomatically selects MLX Whisper Tiny(...TRUNCATED) | " try:\n import torch\n\n has_mps = torch.backends.mps.is_built() and torch.backend(...TRUNCATED) | "def _get_whisper_tiny_model():\n \"\"\"\n Get the best Whisper Tiny model for the current har(...TRUNCATED) | "[{\"test_file\": \"tests/test_asr_mlx_whisper.py\", \"test_function\": \"TestMlxWhisperIntegration.(...TRUNCATED) | "{\"repo_url\": \"https://github.com/docling-project/docling\", \"install_cmd\": \"pip install -e .\(...TRUNCATED) | {"body_lines": 34, "file_lines": 495, "has_docstring": true, "num_tests": 1} | {"status": "passed", "tests_run": 1} |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 9