Instructions to use deepcode-ai/Prompt-Injection-LLM01 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Adapters
How to use deepcode-ai/Prompt-Injection-LLM01 with Adapters:
from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("undefined") model.load_adapter("deepcode-ai/Prompt-Injection-LLM01", set_active=True) - Notebooks
- Google Colab
- Kaggle
File size: 1,132 Bytes
84efe20 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 | import os
import pickle
from tqdm import tqdm
from typing import List
import pandas as pd
from prompt_injection.evaluators.base import PromptEvaluator
def init_evaluator_result_object(output_path,evaluator_list):
result={'idx':[],'Prompt':[]}
if os.path.exists(output_path):
with open(output_path,'rb') as f:
result=pickle.load(f)
if os.path.exists(output_path):
with open(output_path,'rb') as f:
result=pickle.load(f)
for evaluator in evaluator_list:
result[evaluator.get_name()]=result.get(evaluator.get_name(),[])
return result
def evaluate_all(prompts,evaluator_list:List[PromptEvaluator],output_path):
result=init_evaluator_result_object(output_path,evaluator_list)
for i in tqdm(range(len(prompts))):
if i in result["idx"]:
continue
prompt=prompts[i]
result['idx'].append(i)
result['Prompt'].append(prompt)
for evaluator in evaluator_list:
result[evaluator.get_name()].append(evaluator.eval_sample(prompt))
with open(output_path,'wb') as f:
pickle.dump(result, f, protocol=pickle.HIGHEST_PROTOCOL)
return pd.DataFrame.from_dict(result) |