| import os |
| from dotenv import load_dotenv |
| from typing import Any, Callable |
|
|
| from evoagentx.benchmark import HotPotQA,PubMedQA,PertQA,MolQA |
| from evoagentx.optimizers import AFlowOptimizer |
| from evoagentx.models import LiteLLMConfig, LiteLLM, OpenAILLMConfig, OpenAILLM |
|
|
|
|
| load_dotenv() |
| api_key = "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" |
| |
| import os |
| os.environ["OPENAI_API_KEY"] = api_key |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
|
| EXPERIMENTAL_CONFIG = { |
| "humaneval": { |
| "question_type": "code", |
| "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] |
| }, |
| "mbpp": { |
| "question_type": "code", |
| "operators": ["Custom", "CustomCodeGenerate", "Test", "ScEnsemble"] |
| }, |
| "hotpotqa": { |
| "question_type": "qa", |
| "operators": ["Custom", "AnswerGenerate", "QAScEnsemble"] |
| }, |
| "gsm8k": { |
| "question_type": "math", |
| "operators": ["Custom", "ScEnsemble", "Programmer"] |
| }, |
| "math": { |
| "question_type": "math", |
| "operators": ["Custom", "ScEnsemble", "Programmer"] |
| } |
| |
| } |
|
|
| from evoagentx.benchmark import MedPertQA |
| from copy import deepcopy |
|
|
| import nest_asyncio |
| nest_asyncio.apply() |
|
|
| class PubMedQASplits(MedPertQA): |
|
|
| def _load_data(self): |
| |
| super()._load_data() |
| |
| import numpy as np |
| np.random.seed(42) |
| permutation = np.random.permutation(len(self._dev_data)) |
| full_test_data = self._dev_data |
| |
| self._train_data = [full_test_data[idx] for idx in permutation[:50]] |
| self._dev_data = [full_test_data[idx] for idx in permutation[:50]] |
| self._fulldata = full_test_data |
| |
| async def async_evaluate(self, graph: Callable, example: Any) -> float: |
|
|
| |
| prompt = example["question"] |
| inputs = f"Question: {prompt}\nAnswer:" |
| solution = await graph(inputs) |
| label = self._get_label(example) |
| metrics = await super().async_evaluate(prediction=solution, label=label) |
| outlist.append(metrics) |
| return metrics["acc"] |
|
|
|
|
| def collate_func(example: dict) -> dict: |
| prompt = example["question"] |
| problem = f"Question: {prompt}\n\nAnswer:" |
| return {"problem": problem} |
|
|
| |
|
|
| |
|
|
| def main(): |
|
|
| llm_config = OpenAILLMConfig(model="gpt-4o-mini-2024-07-18", openai_key=OPENAI_API_KEY, top_p=0.85, temperature=0.2, frequency_penalty=0.0, presence_penalty=0.0) |
| executor_llm = OpenAILLM(config=llm_config) |
| optimizer_llm = OpenAILLM(config=llm_config) |
|
|
| |
| hotpotqa = MolQA() |
| import numpy as np |
| np.random.seed(2024) |
| out = np.random.choice(hotpotqa._train_data, size=50, replace=False) |
| hotpotqa._train_data = out |
| hotpotqa._dev_data = out |
|
|
| |
| optimizer = AFlowOptimizer( |
| graph_path = "examples/aflow/molqa", |
| optimized_path = "examples/aflow/molqa/optimized_molqa", |
| optimizer_llm=optimizer_llm, |
| executor_llm=executor_llm, |
| validation_rounds=3, |
| eval_rounds=1, |
| max_rounds=20, |
| **EXPERIMENTAL_CONFIG["hotpotqa"] |
| ) |
|
|
| |
| optimizer.optimize(hotpotqa) |
|
|
| |
| optimizer.test(hotpotqa) |
|
|
|
|
| if __name__ == "__main__": |
| outlist = [] |
| main() |
| import pandas as pd |
| dfnew = pd.DataFrame(outlist) |
| dfnew.to_csv("./molqa_save.csv") |