| | import re |
| | import json |
| | import os |
| |
|
| | def parse_json(json_str): |
| | try: |
| | return json.loads(json_str) |
| | except Exception as e1: |
| | |
| | json_match = re.search(r'```json\n(.*?)\n```', json_str, re.DOTALL) |
| | if json_match: |
| | json_str = json_match.group(1) |
| | try: |
| | return json.loads(json_str) |
| | except: |
| | pass |
| | |
| | |
| | |
| | |
| | cleaned_str = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]', '', json_str) |
| | |
| | try: |
| | data = json.loads(cleaned_str) |
| | return data |
| | except Exception as e2: |
| | |
| | print(f"DEBUG: Failed to parse JSON. Original error: {e1}") |
| | print(f"DEBUG: After cleanup error: {e2}") |
| | print(f"DEBUG: Original JSON string (first 500 chars): {repr(json_str[:500])}") |
| | print(f"DEBUG: Cleaned JSON string (first 500 chars): {repr(cleaned_str[:500])}") |
| | raise e2 |
| |
|
| | def most_similar_string(prompt, string_list) -> dict: |
| | similarities = [Levenshtein.distance(prompt, item["Prompt"]) for item in string_list] |
| | most_similar_idx = similarities.index(min(similarities)) |
| | return string_list[most_similar_idx] |
| |
|
| |
|
| | def check_and_fix_prompt(chosed_prompts, prompt_list) -> dict: |
| | results_dict={} |
| |
|
| | for key, item in chosed_prompts.items(): |
| | thought = item["Thought"] |
| | sim_item = most_similar_string(item["Prompt"], prompt_list) |
| | sim_item["Thought"] = thought |
| | results_dict[key] = sim_item |
| | |
| | return results_dict |
| |
|
| |
|
| | def format_dimension_as_string(df, dimension_name) -> str: |
| | df["Dimension"] = df["Dimension"].str.lower().replace(" ", "_") |
| | row = df.loc[df['Dimension'] == dimension_name] |
| | if row.empty: |
| | return f"No data found for dimension: {dimension_name}" |
| | |
| | formatted_string = ( |
| | f"{row['Dimension'].values[0]}: " |
| | f"Very High -> {row['Very High'].values[0]}, " |
| | f"High -> {row['High'].values[0]}, " |
| | f"Moderate -> {row['Moderate'].values[0]}, " |
| | f"Low -> {row['Low'].values[0]}, " |
| | f"Very Low -> {row['Very Low'].values[0]}" |
| | ) |
| | |
| | return formatted_string |
| |
|
| | def extract_between_tags(text, tag) -> str: |
| | pattern = f'<{tag}>(.*?)</{tag}>' |
| | match = re.search(pattern, text, re.DOTALL) |
| | return match.group(1) if match else None |
| |
|
| |
|
| | def format_plans(plans_str) -> dict: |
| | ''' |
| | "<think>The user's query is about the model's ability to generate multiple objects in a single scene. To begin evaluating this capability, I will start with a basic scenario involving two distinct objects. This will help establish whether the model can handle simple multi-object scenes before exploring more complex scenarios. Now I will analyze the Basic generation of two distinct objects in a simple scene sub-aspect dimension.</think><tool>Multiple Objects</tool>" |
| | ''' |
| |
|
| | plan = {} |
| | if '</summary>' in plans_str: |
| | thought_content, summary_content = extract_between_tags(plans_str, "think"), extract_between_tags(plans_str, "summary") |
| | plan["thought"], plan["summary"] = thought_content, summary_content |
| | else: |
| | think_content, tool_content = extract_between_tags(plans_str, "think"), extract_between_tags(plans_str, "tool") |
| | plan["thought"], plan["tool"] = think_content, tool_content |
| | |
| | return plan |
| |
|
| | def save_json(content, file_path): |
| | with open(file_path, 'w') as json_file: |
| | json.dump(content, json_file, indent=4) |
| | print(f"Results are saved to {os.path.abspath(file_path)}") |
| |
|
| | def tool_existence(tool_name): |
| | tool_list = ["subject consistency", "background consistency", "motion smoothness", "aesthetic quality", "imaging quality", "appearance style", "temporal style", "overall consistency", "multiple objects", "object class", "dynamic degree", "human action", "color", "spatial relationship", "scene"] |
| | |
| | |
| | tool_name = tool_name.lower().replace("_", " ") |
| | for tool in tool_list: |
| | if tool in tool_name: |
| | return tool.replace(" ", "_") |
| | return None |
| |
|
| | def compute_score(pred, gt): |
| | score = 0. |
| | answer = parse_answer(pred) |
| |
|
| | if answer is None: |
| | return score |
| | |
| | pattern = r"\s*([A-Z])\s*" |
| | match = re.search(pattern, answer, re.DOTALL) |
| | try: |
| | answer = match.group(1) |
| | if answer.strip().lower() == gt.strip().lower(): |
| | score = 1. |
| | except: |
| | pass |
| |
|
| | return score |
| |
|
| | |