| | import os |
| | import sys |
| | import json |
| | import glob |
| | import argparse |
| | from datasets import load_dataset, concatenate_datasets |
| | import pandas as pd |
| | import shutil |
| | import chardet |
| | import ast |
| | import transformers |
| |
|
| | |
| | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
| | from eval_agent.system_prompts import sys_prompts |
| |
|
| |
|
| | vbench_dimention_df = pd.read_csv("eval_agent/vbench_dimension_scores.tsv", sep="\t") |
| | t2i_dimention_df = pd.read_csv("eval_agent/t2i_dimension_scores.tsv", sep="\t") |
| |
|
| | |
| | alpaca_template = { |
| | "instruction": "{instruction}", |
| | "input": "{input}", |
| | "output": "{output}", |
| | "system": "{system}" |
| | } |
| |
|
| |
|
| | thinking_template = "<think>{thinking}</think>" |
| | tool_template = "<tool>{tool}</tool>" |
| |
|
| | observation_template = "<information>{information}</information>" |
| |
|
| | analysis_template = "<analysis>{analysis}</analysis>" |
| | summary_template = "<summary>{summary}</summary>" |
| |
|
| | |
| | global_counter = 0 |
| | length_counter = 0 |
| |
|
| | def format_subaspect(sub_aspect): |
| | """Format sub-aspect for output.""" |
| | import random |
| | template_list = [ |
| | "I will evaluate the model's sub-aspect: {sub_aspect}.", |
| | "I will focus on the {sub_aspect} sub-aspect of the model.", |
| | "Let me assess the {sub_aspect} sub-aspect of this model.", |
| | "I need to examine the model's {sub_aspect} sub-aspect.", |
| | "Now I will analyze the {sub_aspect} sub-aspect dimension.", |
| | "I'll investigate the {sub_aspect} sub-aspect quality of the model.", |
| | "Time to evaluate the {sub_aspect} sub-aspect performance.", |
| | "I should check the model's {sub_aspect} sub-aspect capabilities." |
| | ] |
| | selected_template = random.choice(template_list) |
| | return selected_template.format(sub_aspect=sub_aspect) |
| |
|
| | def format_summary(analysis, summary): |
| | """Format summary for output.""" |
| | return f"Analysis: {analysis}\nSummary: {summary}" |
| | |
| | def load_data(file_path): |
| | """Load JSON data from a file.""" |
| | with open(file_path, "r", encoding="utf-8") as f: |
| | return json.load(f) |
| | |
| | def format_template(template, **kwargs): |
| | """Format a template with provided values.""" |
| | if isinstance(template, dict): |
| | result = {} |
| | for key, value in template.items(): |
| | if isinstance(value, str): |
| | result[key] = value.format(**kwargs) |
| | else: |
| | result[key] = value |
| | return result |
| | return template.format(**kwargs) |
| |
|
| | def extract_obs(tool_name, obs): |
| | """Extract observation information for a tool.""" |
| | return f"Observation: {obs}" |
| |
|
| | def check_data(data): |
| | """Check if data is valid.""" |
| | if len(data["cot"]) > 8: |
| | return False |
| | if data["cot"][-1]["answer"] != data["ground_truth"]: |
| | return False |
| | return True |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| |
|
| | |
| | def format_dimension_as_string(df, dimension_name): |
| | row = df.loc[df['Dimension'] == dimension_name] |
| | if row.empty: |
| | return f"No data found for dimension: {dimension_name}" |
| | |
| | formatted_string = ( |
| | f"{row['Dimension'].values[0]}: " |
| | f"Very High -> {row['Very High'].values[0]}, " |
| | f"High -> {row['High'].values[0]}, " |
| | f"Moderate -> {row['Moderate'].values[0]}, " |
| | f"Low -> {row['Low'].values[0]}, " |
| | f"Very Low -> {row['Very Low'].values[0]}" |
| | ) |
| | |
| | return formatted_string |
| |
|
| | def format_eval_results(results, reference_table): |
| | tool_name = results["Tool"] |
| | average_score = results["eval_results"]["score"][0] |
| | video_results = results["eval_results"]["score"][1] |
| | |
| |
|
| | |
| | output = f"Scoring Reference Table of '{tool_name}': {reference_table}\n\n" |
| | output += f"Results:\n" |
| | output += f"- Overall score: {average_score:.4f}\n" |
| | output += f"- Per-prompt scores:\n" |
| |
|
| | for video in video_results: |
| | prompt = video["prompt"] |
| | score = video["video_results"] |
| | output += f" • \"{prompt}\": {score:.4f}\n" |
| | |
| | return output |
| |
|
| | |
| | def convert_to_alpaca(json_path, output_dir, return_data=False): |
| | """Convert data to Alpaca format for training.""" |
| | global global_counter |
| | data_list = [] |
| | |
| | with open(json_path, "r", encoding="utf-8") as in_f: |
| | data = json.load(in_f) |
| | |
| | |
| | data.pop() |
| | |
| | |
| | ops = [] |
| | obs = [] |
| | |
| |
|
| | |
| | |
| | for i in range(1, len(data)): |
| | |
| | try: |
| | if i == len(data) - 1: |
| | op = f"{thinking_template.format(thinking=data[i]['Thought'])}{summary_template.format(summary=format_summary(data[i]['Analysis'], data[i]['Summary']))}" |
| | else: |
| | op = f"{thinking_template.format(thinking=data[i]['Thought'] + ' ' + format_subaspect(data[i]['Sub-aspect']))}{tool_template.format(tool=data[i]['Tool'])}" |
| | |
| | |
| | |
| | |
| | |
| | reference_table = format_dimension_as_string(vbench_dimention_df, data[i]['Tool']) |
| | obs.append(observation_template.format(information=format_eval_results(data[i], reference_table))) |
| | |
| | |
| | except Exception as e: |
| | print(f"Error in processing data {json_path} at step {i}: {e}") |
| | continue |
| | |
| | ops.append(op) |
| | |
| | |
| | |
| | history = [] |
| | for j in range(1, i): |
| | if j == 1: |
| | traj = [ |
| | data[0], |
| | ops[j-1] |
| | ] |
| | else: |
| | traj = [ |
| | obs[j-2], |
| | ops[j-1] |
| | ] |
| | |
| | history.append(traj) |
| | |
| | |
| | |
| | |
| | |
| | if i == 1: |
| | data_n = format_template(alpaca_template, **{ |
| | "instruction": data[0], |
| | "input": "", |
| | "output": op, |
| | "system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"] |
| | }) |
| | else: |
| | data_n = format_template(alpaca_template, **{ |
| | "instruction": obs[i-2], |
| | "input": "", |
| | "output": op, |
| | "system": sys_prompts["eval-agent-vbench-training-sys_v1"] + sys_prompts["eval-agent-format-sys"] |
| | }) |
| | |
| | data_n["history"] = history |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | data_list.append(data_n) |
| | global_counter += 1 |
| | |
| | if return_data: |
| | return data_list |
| | |
| | print(f"Size of the sft dataset: {len(data_list)}") |
| | |
| | |
| | os.makedirs(output_dir, exist_ok=True) |
| | |
| | |
| | with open(os.path.join(output_dir, "processed_data.json"), "w", encoding="utf-8") as out_f: |
| | json.dump(data_list, out_f, ensure_ascii=False, indent=4) |
| |
|
| | |
| | |
| | def arg_parse(): |
| | """Parse command line arguments.""" |
| | parser = argparse.ArgumentParser(description="Process EgoLife data for training") |
| | parser.add_argument("--home_dir", type=str, default="/home/data2/sltian/code/evaluation_agent_dev") |
| | parser.add_argument("--data_dir", type=str, default="ea-data") |
| | parser.add_argument("--output_dir", type=str, default=None) |
| | parser.add_argument("--format", type=str, choices=["alpaca", "glaive"], default="alpaca") |
| | return parser.parse_args() |
| |
|
| | def main(): |
| | """Main function to process data.""" |
| | args = arg_parse() |
| |
|
| | if args.output_dir is None: |
| | import datetime |
| | args.output_dir = os.path.join("data", f"postprocess_{datetime.datetime.now().strftime('%Y%m%d')}") |
| |
|
| | |
| | preprocess_dir = "/home/data2/sltian/code/evaluation_agent_dev/data/preprocess" |
| | json_files = glob.glob(os.path.join(preprocess_dir, "*.json")) |
| | |
| | |
| | chat_files = [f for f in json_files if not f.endswith("mapping.txt") and not f.endswith("summary_by_model.txt") and f.endswith(".json")] |
| | |
| | print(f"Found {len(chat_files)} chat history files to process") |
| | |
| | |
| | all_data = [] |
| | for i, json_path in enumerate(chat_files): |
| | print(f"Processing file {i+1}/{len(chat_files)}: {os.path.basename(json_path)}") |
| | try: |
| | file_data = convert_to_alpaca(json_path, args.output_dir, return_data=True) |
| | all_data.extend(file_data) |
| | except Exception as e: |
| | print(f"Error processing {json_path}: {e}") |
| | continue |
| | |
| | print(f"\nTotal training examples created: {len(all_data)}") |
| | |
| | |
| | os.makedirs(args.output_dir, exist_ok=True) |
| | output_path = os.path.join(args.output_dir, "evaluation_agent_cot_dataset.json") |
| | with open(output_path, "w", encoding="utf-8") as f: |
| | json.dump(all_data, f, ensure_ascii=False, indent=2) |
| | |
| | print(f"Combined dataset saved to: {output_path}") |
| |
|
| | if __name__ == "__main__": |
| | main() |