add evaluation code for Qwen-7B-Chat
parent
19474456d8
commit
1134e08be7
@ -0,0 +1,137 @@
|
||||
import random
|
||||
import tqdm
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import torch
|
||||
import numpy as np
|
||||
import jsonlines
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from datasets import load_from_disk,load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
|
||||
'''
|
||||
python eval/evaluate_chat_gsm8k.py [--use-fewshot]
|
||||
'''
|
||||
|
||||
INVALID_ANS = "[invalid]"
|
||||
DEVICE = "cuda:0"
|
||||
|
||||
def doc_to_text(doc, use_fewshot):
|
||||
if use_fewshot:
|
||||
context = "Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\n" \
|
||||
"Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n\n" \
|
||||
"Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\n" \
|
||||
"Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n\n" \
|
||||
"Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\n" \
|
||||
"When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n\n" \
|
||||
"Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\n" \
|
||||
"For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n\n" \
|
||||
f"Question: {doc['question']}\nLet's think step by step"
|
||||
else:
|
||||
context = doc['question']
|
||||
return context
|
||||
|
||||
def decode(tokens_list, tokenizer, raw_text_len):
|
||||
sents = []
|
||||
# print(len(tokens_list))
|
||||
for tokens in tokens_list:
|
||||
tokens = tokens.cpu().numpy().tolist()
|
||||
sent = tokenizer.tokenizer.decode(
|
||||
tokens[raw_text_len:])
|
||||
sent = sent.split('<|endoftext|>')[0]
|
||||
sent = sent.split('\n\n\n')[0]
|
||||
sent = sent.split("\n\n")[0]
|
||||
sent = sent.split("Question:")[0]
|
||||
sents.append(sent)
|
||||
return sents
|
||||
|
||||
def generate_sample(model, tokenizer, question):
|
||||
response, history = model.chat(
|
||||
tokenizer,
|
||||
question,
|
||||
history=None,
|
||||
)
|
||||
print(question)
|
||||
print("-------------")
|
||||
print(response)
|
||||
print("=============")
|
||||
return response
|
||||
|
||||
|
||||
def extract_answer_hf(completion):
|
||||
def _get_last_digit(s):
|
||||
_PAT_LAST_DIGIT = re.compile(r"(?<=(\s|[\$%#{]))([+-])?(?=(\S))(0|([1-9](\d*|\d{0,2}(,\d{3})*)))?(\.\d*[1-9])?(?=(\s|[.,}]|$))")
|
||||
match = list(_PAT_LAST_DIGIT.finditer(s))
|
||||
if match:
|
||||
last_digit = match[-1].group().replace(",", "").replace("+", "")
|
||||
# print(f"The last digit in {s} is {last_digit}")
|
||||
else:
|
||||
last_digit = None
|
||||
print(f"No digits found in {s!r}")
|
||||
return last_digit
|
||||
|
||||
job_gen = completion.strip('.').replace('\n', '\\n')
|
||||
last_digit = _get_last_digit(job_gen)
|
||||
if last_digit is not None:
|
||||
return eval(last_digit)
|
||||
else:
|
||||
return INVALID_ANS
|
||||
|
||||
def extract_answer(completion):
|
||||
try:
|
||||
last_number = re.findall(r'\d+', completion)[-1]
|
||||
return eval(last_number)
|
||||
except:
|
||||
return INVALID_ANS
|
||||
|
||||
def is_correct( completion, answer):
|
||||
gold = extract_answer(answer)
|
||||
assert gold != INVALID_ANS, "No ground truth answer found in the document."
|
||||
return extract_answer(completion) == gold
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser(description='Test HF checkpoint.')
|
||||
parser.add_argument("-c", "--checkpoint-path", type=Path, help="Checkpoint path", default="Qwen/Qwen-7B-Chat")
|
||||
parser.add_argument("-f","--sample-input-file", type=str, default=None)
|
||||
parser.add_argument("-o","--sample-output-file", type=str, default="gsm8k_res.jsonl")
|
||||
parser.add_argument("--use-fewshot", action="store_true")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.sample_input_file is not None:
|
||||
dataset = load_from_disk(args.sample_input_file)# or:
|
||||
else:
|
||||
dataset = load_dataset("gsm8k", "main")
|
||||
|
||||
print('Loading tokenizer ...')
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True, bf16=True, use_flash_attn=True)
|
||||
|
||||
print('Loading model ...')
|
||||
model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
|
||||
model.generation_config.do_sample = False # use greedy decoding
|
||||
|
||||
test = dataset["test"]
|
||||
|
||||
f_output = open(args.sample_output_file, 'w', encoding='utf-8')
|
||||
tot_length = test.num_rows
|
||||
acc_res = []
|
||||
for doc in tqdm.tqdm(test):
|
||||
context = doc_to_text(doc, args.use_fewshot)
|
||||
print(context)
|
||||
completion = generate_sample(model, tokenizer, context)
|
||||
answer = doc["answer"]
|
||||
acc = is_correct(completion, answer)
|
||||
doc["completion"] = completion
|
||||
doc["acc"] = acc
|
||||
f_output.write(json.dumps(doc, ensure_ascii=False) + "\n")
|
||||
f_output.flush()
|
||||
acc_res.append(acc)
|
||||
|
||||
f_output.close()
|
||||
print("4-shot Acc: " if args.use_fewshot else "Zero-shot Acc", np.mean(acc_res))
|
@ -0,0 +1,82 @@
|
||||
import random
|
||||
import tqdm
|
||||
import os
|
||||
import sys
|
||||
import torch
|
||||
import jsonlines
|
||||
import argparse
|
||||
import jsonlines
|
||||
from pathlib import Path
|
||||
import re
|
||||
import textwrap
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
|
||||
"""
|
||||
Get the HumanEval.jsonl file from [here](https://github.com/openai/human-eval/tree/master/data)
|
||||
|
||||
python eval/evaluate_chat_humaneval.py -f HumanEval.jsonl -o HumanEval_res.jsonl
|
||||
git clone https://github.com/openai/human-eval
|
||||
pip install -e human-eval
|
||||
evaluate_functional_correctness HumanEval_res.jsonl
|
||||
"""
|
||||
|
||||
DEVICE = "cuda:0"
|
||||
|
||||
def extract_code(text, entry_point):
|
||||
|
||||
# 正则表达式匹配代码块
|
||||
code_block_pattern = re.compile(rf"```(?:[Pp]ython\n)?.*?def\s+{entry_point}.*?:\n(.*?)\n```", re.DOTALL)
|
||||
code_block = code_block_pattern.search(text)
|
||||
if code_block is None:
|
||||
code_block_pattern = re.compile(rf"def\s+{entry_point}.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL)
|
||||
code_block = code_block_pattern.search(text)
|
||||
if code_block is None:
|
||||
code_block_pattern = re.compile(rf"def.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL)
|
||||
code_block = code_block_pattern.search(text)
|
||||
|
||||
if code_block is not None:
|
||||
return code_block.group(1)
|
||||
else:
|
||||
# if no code block is found, assume the LM is simply filling the code
|
||||
return textwrap.indent(text, ' ' * 4)
|
||||
|
||||
def generate_sample(model, tokenizer, question, entry_point):
|
||||
response, history = model.chat(
|
||||
tokenizer,
|
||||
question,
|
||||
history=None,
|
||||
)
|
||||
print(question)
|
||||
print(response)
|
||||
answer = extract_code(response, entry_point)
|
||||
return answer, response
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser(description='Test HF checkpoint.')
|
||||
parser.add_argument("-c", "--checkpoint-path", type=Path, help='Checkpoint path', default="Qwen/Qwen-7B-Chat")
|
||||
parser.add_argument("-f","--sample-input-file", type=str, default=None, help="data path to HumanEval.jsonl")
|
||||
parser.add_argument("-o","--sample-output-file", type=str, default="HumanEval_res.jsonl")
|
||||
|
||||
|
||||
args = parser.parse_args()
|
||||
print('Loading tokenizer ...')
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True)
|
||||
|
||||
print('Loading model ...')
|
||||
model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True, bf16=True, use_flash_attn=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
|
||||
model.generation_config.do_sample = False # use greedy decoding
|
||||
|
||||
f_output = jsonlines.Writer(open(args.sample_output_file, 'w', encoding='utf-8'))
|
||||
|
||||
f = jsonlines.open(args.sample_input_file)
|
||||
with f_output as output:
|
||||
for jobj in tqdm.tqdm(f, desc='task_idx'):
|
||||
prompt = "Help me fill the following code.\n" + jobj['prompt']
|
||||
task_id = jobj['task_id']
|
||||
answer, response = generate_sample(model, tokenizer, prompt, jobj['entry_point'])
|
||||
gen_jobjs = {'task_id': task_id, "completion": answer, 'response': response}
|
||||
output.write(gen_jobjs)
|
||||
f_output.close()
|
@ -0,0 +1,207 @@
|
||||
import os
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import argparse
|
||||
import datasets
|
||||
import torch
|
||||
import re
|
||||
from thefuzz import process
|
||||
from typing import List
|
||||
from tqdm import tqdm
|
||||
from transformers.trainer_utils import set_seed
|
||||
|
||||
'''
|
||||
wget https://people.eecs.berkeley.edu/~hendrycks/data.tar
|
||||
mkdir data/mmlu
|
||||
mv data.tar data/mmlu
|
||||
cd data/mmlu; tar xf data.tar
|
||||
cd ../../
|
||||
|
||||
pip install thefuzz
|
||||
python eval/evaluate_chat_mmlu.py -d data/mmlu/data/
|
||||
'''
|
||||
|
||||
def load_models_tokenizer(args):
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True, bf16=True, use_flash_attn=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True)
|
||||
model.generation_config.do_sample = False # use greedy decoding
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
def format_example(line):
|
||||
example = 'The following is a multiple-choice question. Please choose the most suitable one among A, B, C and D as the answer to this question.\n\n' + line['question'] + "\n"
|
||||
for choice in choices:
|
||||
example += f'{choice}. {line[f"{choice}"]}\n'
|
||||
return example
|
||||
|
||||
|
||||
def process_before_extraction(gen, choice_dict):
|
||||
# replace the choice by letter in the generated sentence
|
||||
# from longest one to shortest one
|
||||
for key, val in sorted(choice_dict.items(), key=lambda x: len(x[1]), reverse=True):
|
||||
pattern = re.compile(re.escape(val.rstrip(".")), re.IGNORECASE)
|
||||
gen = pattern.sub(key, gen)
|
||||
return gen
|
||||
|
||||
def extract_choice(gen, choice_list):
|
||||
# answer is A | choice is A | choose A
|
||||
res = re.search(r"(?:(?:[Cc]hoose)|(?:(?:[Aa]nswer|[Cc]hoice)(?![^ABCD]{0,20}?(?:n't|not))[^ABCD]{0,10}?\b(?:|is|:|be))\b)[^ABCD]{0,20}?\b(A|B|C|D)\b", gen)
|
||||
|
||||
# A is correct | A is right
|
||||
if res is None:
|
||||
res = re.search(r"\b(A|B|C|D)\b(?![^ABCD]{0,8}?(?:n't|not)[^ABCD]{0,5}?(?:correct|right))[^ABCD]{0,10}?\b(?:correct|right)\b", gen)
|
||||
|
||||
# straight answer: A
|
||||
if res is None:
|
||||
res = re.search(r"^(A|B|C|D)(?:\.|,|:|$)", gen)
|
||||
|
||||
# simply extract the first appearred letter
|
||||
if res is None:
|
||||
res = re.search(r"(?<![a-zA-Z])(A|B|C|D)(?![a-zA-Z=])", gen)
|
||||
|
||||
if res is None:
|
||||
return choices[choice_list.index(process.extractOne(gen, choice_list)[0])]
|
||||
else:
|
||||
return res.group(1)
|
||||
|
||||
def extract_answer(response, row):
|
||||
gen = process_before_extraction(response, {choice: row[choice] for choice in choices})
|
||||
pred = extract_choice(gen, [row[choice] for choice in choices])
|
||||
return pred
|
||||
|
||||
@torch.no_grad()
|
||||
def eval_subject(
|
||||
model,
|
||||
tokenizer,
|
||||
subject_name,
|
||||
test_df,
|
||||
save_result_dir=None,
|
||||
overwrite=False,
|
||||
**kwargs
|
||||
):
|
||||
result_path = os.path.join(save_result_dir, f'{subject_name}_result.csv')
|
||||
if not overwrite and os.path.exists(result_path):
|
||||
print(f"{result_path} existed, skip!")
|
||||
score = []
|
||||
for (_, datarow), (_, resultrow) in zip(test_df.iterrows(), pd.read_csv(result_path).iterrows()):
|
||||
# pred = extract_answer(resultrow['model_response'], datarow)
|
||||
pred = resultrow['model_output']
|
||||
correct = 1 if pred == datarow['answer'] else 0
|
||||
score.append(correct)
|
||||
return score
|
||||
|
||||
result = []
|
||||
score = []
|
||||
|
||||
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
|
||||
question = format_example(row)
|
||||
|
||||
response, history = model.chat(
|
||||
tokenizer,
|
||||
question,
|
||||
history=None,
|
||||
)
|
||||
print(question)
|
||||
print(response)
|
||||
pred = extract_answer(response, row)
|
||||
print(pred)
|
||||
print("======================")
|
||||
|
||||
if 'answer' in row:
|
||||
correct = 1 if pred == row['answer'] else 0
|
||||
score.append(correct)
|
||||
if args.debug: print(f'{question} pred: {pred} ref: {row["answer"]}')
|
||||
result.append(pred)
|
||||
|
||||
if save_result_dir:
|
||||
test_df['model_output'] = result
|
||||
test_df['model_response'] = response
|
||||
if score:
|
||||
test_df["correctness"] = score
|
||||
os.makedirs(save_result_dir, exist_ok=True)
|
||||
test_df.to_csv(os.path.join(
|
||||
save_result_dir, f'{subject_name}_result.csv'), encoding="utf-8", index=False)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
def cal_mmlu(res):
|
||||
acc_sum_dict = dict()
|
||||
acc_norm_sum_dict = dict()
|
||||
cnt_dict = dict()
|
||||
acc_sum = 0.
|
||||
cnt = 0
|
||||
hard_cnt = 0
|
||||
hard_acc_sum = 0.
|
||||
|
||||
for class_ in TASK_NAME_MAPPING.keys():
|
||||
acc_sum_dict[class_] = 0.
|
||||
acc_norm_sum_dict[class_] = 0.
|
||||
cnt_dict[class_] = 0.
|
||||
|
||||
for tt in TASK_NAME_MAPPING[class_]:
|
||||
acc_sum += sum(res[tt])
|
||||
cnt += len(res[tt])
|
||||
|
||||
acc_sum_dict[class_] += sum(res[tt])
|
||||
cnt_dict[class_] += len(res[tt])
|
||||
|
||||
print('\n\n\n')
|
||||
for k in TASK_NAME_MAPPING.keys():
|
||||
if k in cnt_dict:
|
||||
print('%s ACC: %.2f ' % (
|
||||
k, acc_sum_dict[k] * 100 / cnt_dict[k]))
|
||||
print('AVERAGE ACC:%.2f ' % (acc_sum *100 / cnt))
|
||||
|
||||
|
||||
def main(args):
|
||||
print("loading model weights")
|
||||
if args.checkpoint_path is not None:
|
||||
model, tokenizer = load_models_tokenizer(args)
|
||||
else:
|
||||
model, tokenizer = None, None
|
||||
print("model loaded")
|
||||
|
||||
dev_result = {}
|
||||
for subject_name in tqdm(SUBJECTS):
|
||||
# val_file_path = os.path.join(args.eval_data_path, 'val', f'{subject_name}_val.csv')
|
||||
# dev_file_path = os.path.join(args.eval_data_path, 'dev', f'{subject_name}_dev.csv')
|
||||
test_file_path = os.path.join(args.eval_data_path, 'test', f'{subject_name}_test.csv')
|
||||
# val_df = pd.read_csv(val_file_path, names=['question','A','B','C','D','answer'])
|
||||
# dev_df = pd.read_csv(dev_file_path, names=['question','A','B','C','D','answer'])
|
||||
test_df = pd.read_csv(test_file_path, names=['question','A','B','C','D','answer'])
|
||||
|
||||
score = eval_subject(model, tokenizer, subject_name, test_df, save_result_dir=f"outs_chat/mmlu_eval_result", overwrite=args.overwrite)
|
||||
dev_result[subject_name] = score
|
||||
cal_mmlu(dev_result)
|
||||
|
||||
|
||||
TASK_NAME_MAPPING = {'stem': ['abstract_algebra', 'anatomy', 'astronomy', 'college_biology', 'college_chemistry', 'college_computer_science', 'college_mathematics', 'college_physics', 'computer_security', 'conceptual_physics', 'electrical_engineering', 'elementary_mathematics', 'high_school_biology', 'high_school_chemistry', 'high_school_computer_science', 'high_school_mathematics', 'high_school_physics', 'high_school_statistics', 'machine_learning'],
|
||||
'Humanities': ['formal_logic', 'high_school_european_history', 'high_school_us_history', 'high_school_world_history', 'international_law', 'jurisprudence', 'logical_fallacies', 'moral_disputes', 'moral_scenarios', 'philosophy', 'prehistory', 'professional_law', 'world_religions'],
|
||||
'other': ['business_ethics', 'college_medicine', 'human_aging', 'management', 'marketing', 'medical_genetics', 'miscellaneous', 'nutrition', 'professional_accounting', 'professional_medicine', 'virology', 'global_facts', 'clinical_knowledge'],
|
||||
'social': ['econometrics', 'high_school_geography', 'high_school_government_and_politics', 'high_school_macroeconomics', 'high_school_microeconomics', 'high_school_psychology', 'human_sexuality', 'professional_psychology', 'public_relations', 'security_studies', 'sociology', 'us_foreign_policy']}
|
||||
SUBJECTS = [v for vl in TASK_NAME_MAPPING.values() for v in vl]
|
||||
choices = ["A", "B", "C", "D"]
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Test HF checkpoint.')
|
||||
parser.add_argument('-c', '--checkpoint-path', type=str, help='Checkpoint path', default="Qwen/Qwen-7B-Chat")
|
||||
parser.add_argument('-s', '--seed', type=int, default=1234, help='Random seed')
|
||||
|
||||
"""Provide extra arguments required for tasks."""
|
||||
group = parser.add_argument_group(title='Evaluation options')
|
||||
group.add_argument('-d', '--eval_data_path', type=str,
|
||||
help='Path to eval data')
|
||||
group.add_argument("--debug", action='store_true', default=False,
|
||||
help='Print infos.')
|
||||
group.add_argument("--overwrite", action='store_true', default=False,
|
||||
help='Overwrite existed results')
|
||||
|
||||
args = parser.parse_args()
|
||||
set_seed(args.seed)
|
||||
|
||||
main(args)
|
Loading…
Reference in New Issue