diff --git a/eval/evaluate_chat_ceval.py b/eval/evaluate_chat_ceval.py new file mode 100644 index 0000000..10d5b27 --- /dev/null +++ b/eval/evaluate_chat_ceval.py @@ -0,0 +1,290 @@ +import os +import pandas as pd +import numpy as np +import argparse +import datasets +import torch +import re +from thefuzz import process +from typing import List +from tqdm import tqdm +from transformers.trainer_utils import set_seed + +''' +wget https://huggingface.co/datasets/ceval/ceval-exam/resolve/main/ceval-exam.zip +mkdir data/ceval +mv ceval-exam.zip data/ceval +cd data/ceval; unzip ceval-exam.zip +cd ../../ + +pip install thefuzz +python eval/evaluate_chat_ceval.py -d data/ceval +''' + +def load_models_tokenizer(args): + from transformers import AutoModelForCausalLM, AutoTokenizer + from transformers.generation import GenerationConfig + + tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True) + model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True, bf16=True, use_flash_attn=True).eval() + model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True) + model.generation_config.do_sample = False # use greedy decoding + return model, tokenizer + +def process_before_extraction(gen, question, choice_dict): + # Example Prompt: + # 关于传输层的面向连接服务的特性是____。 + # A. 既不保证可靠,也不保证按序交付 + # B. 不保证可靠,但保证按序交付 + # C. 保证可靠,但不保证按序交付 + # D. 既保证可靠,也保证按序交付 + # Example Model Output: + # 关于传输层的面向连接服务的特性是既保证可靠,也保证按序交付 + # Processed Output: + # 答案是D + + question_split = question.rstrip("。").split("。")[-1].split("_") + + # replacing the question + if len(question_split[0].strip()) > 4: + gen = gen.replace(question_split[0], "答案是") + if len(question_split[-1].strip()) > 4: + gen = gen.replace(question_split[-1], "") + + # replace the choice by letter in the generated sentence + # from longest one to shortest one + for key, val in sorted(choice_dict.items(), key=lambda x: len(x[1]), reverse=True): + gen = gen.replace(val.rstrip("。"), key) + return gen + +def count_substr(gen, pattern): + return len(re.findall(pattern, gen)) + +def extract_choice(gen, prompt, choice_list): + # 答案是A | 选项是A | 应该选A选项 + res = re.search(r"(?:(?:选|选择|选定)|(?:(?:答案|选项)(?![^ABCD]{0,10}?(?:不|非)[^ABCD]{0,10}?(?:是|为|:|:|】))[^ABCD]{0,10}?(?:是|为|:|:|】))[^ABCD]{0,10}?)(A|B|C|D)(?:选项)?(?:\)|。|\.|,|,|.|、|A|B|C|D|$)", gen) + + # A选项正确 | A选项符合题意 + if res is None: + res = re.search(r"(A|B|C|D)(?:选?项)?(?![^ABCD]{0,4}?(?:不|非)[^ABCD]{0,4}?(?:正确|对|符合))[^ABCD]{0,4}?(?:正确|对|符合)", gen) + + # 直接输出 A + if res is None: + res = re.search(r"^(A|B|C|D)(?:。|\.|,|,|.|$)", gen) + + # 获取第一个出现的字母 + if res is None: + res = re.search(r"(? 0: + print('Hard acc:%.2f ' % (hard_acc_sum / hard_cnt)) + print('AVERAGE acc:%.2f ' % (acc_sum / cnt)) + + +TASK_NAME_MAPPING = { + "computer_network": ["Computer Network", "\u8ba1\u7b97\u673a\u7f51\u7edc", "STEM"], + "operating_system": ["Operating System", "\u64cd\u4f5c\u7cfb\u7edf", "STEM"], + "computer_architecture": ["Computer Architecture", "\u8ba1\u7b97\u673a\u7ec4\u6210", "STEM"], + "college_programming": ["College Programming", "\u5927\u5b66\u7f16\u7a0b", "STEM"], + "college_physics": ["College Physics", "\u5927\u5b66\u7269\u7406", "STEM"], + "college_chemistry": ["College Chemistry", "\u5927\u5b66\u5316\u5b66", "STEM"], + "advanced_mathematics": ["Advanced Mathematics", "\u9ad8\u7b49\u6570\u5b66", "STEM"], + "probability_and_statistics": ["Probability and Statistics", "\u6982\u7387\u7edf\u8ba1", "STEM"], + "discrete_mathematics": ["Discrete Mathematics", "\u79bb\u6563\u6570\u5b66", "STEM"], + "electrical_engineer": ["Electrical Engineer", "\u6ce8\u518c\u7535\u6c14\u5de5\u7a0b\u5e08", "STEM"], + "metrology_engineer": ["Metrology Engineer", "\u6ce8\u518c\u8ba1\u91cf\u5e08", "STEM"], + "high_school_mathematics": ["High School Mathematics", "\u9ad8\u4e2d\u6570\u5b66", "STEM"], + "high_school_physics": ["High School Physics", "\u9ad8\u4e2d\u7269\u7406", "STEM"], + "high_school_chemistry": ["High School Chemistry", "\u9ad8\u4e2d\u5316\u5b66", "STEM"], + "high_school_biology": ["High School Biology", "\u9ad8\u4e2d\u751f\u7269", "STEM"], + "middle_school_mathematics": ["Middle School Mathematics", "\u521d\u4e2d\u6570\u5b66", "STEM"], + "middle_school_biology": ["Middle School Biology", "\u521d\u4e2d\u751f\u7269", "STEM"], + "middle_school_physics": ["Middle School Physics", "\u521d\u4e2d\u7269\u7406", "STEM"], + "middle_school_chemistry": ["Middle School Chemistry", "\u521d\u4e2d\u5316\u5b66", "STEM"], + "veterinary_medicine": ["Veterinary Medicine", "\u517d\u533b\u5b66", "STEM"], + "college_economics": ["College Economics", "\u5927\u5b66\u7ecf\u6d4e\u5b66", "Social Science"], + "business_administration": ["Business Administration", "\u5de5\u5546\u7ba1\u7406", "Social Science"], + "marxism": ["Marxism", "\u9a6c\u514b\u601d\u4e3b\u4e49\u57fa\u672c\u539f\u7406", "Social Science"], + "mao_zedong_thought": ["Mao Zedong Thought", "\u6bdb\u6cfd\u4e1c\u601d\u60f3\u548c\u4e2d\u56fd\u7279\u8272\u793e\u4f1a\u4e3b\u4e49\u7406\u8bba\u4f53\u7cfb\u6982\u8bba", "Social Science"], + "education_science": ["Education Science", "\u6559\u80b2\u5b66", "Social Science"], + "teacher_qualification": ["Teacher Qualification", "\u6559\u5e08\u8d44\u683c", "Social Science"], + "high_school_politics": ["High School Politics", "\u9ad8\u4e2d\u653f\u6cbb", "Social Science"], + "high_school_geography": ["High School Geography", "\u9ad8\u4e2d\u5730\u7406", "Social Science"], + "middle_school_politics": ["Middle School Politics", "\u521d\u4e2d\u653f\u6cbb", "Social Science"], + "middle_school_geography": ["Middle School Geography", "\u521d\u4e2d\u5730\u7406", "Social Science"], + "modern_chinese_history": ["Modern Chinese History", "\u8fd1\u4ee3\u53f2\u7eb2\u8981", "Humanities"], + "ideological_and_moral_cultivation": ["Ideological and Moral Cultivation", "\u601d\u60f3\u9053\u5fb7\u4fee\u517b\u4e0e\u6cd5\u5f8b\u57fa\u7840", "Humanities"], + "logic": ["Logic", "\u903b\u8f91\u5b66", "Humanities"], + "law": ["Law", "\u6cd5\u5b66", "Humanities"], + "chinese_language_and_literature": ["Chinese Language and Literature", "\u4e2d\u56fd\u8bed\u8a00\u6587\u5b66", "Humanities"], + "art_studies": ["Art Studies", "\u827a\u672f\u5b66", "Humanities"], + "professional_tour_guide": ["Professional Tour Guide", "\u5bfc\u6e38\u8d44\u683c", "Humanities"], + "legal_professional": ["Legal Professional", "\u6cd5\u5f8b\u804c\u4e1a\u8d44\u683c", "Humanities"], + "high_school_chinese": ["High School Chinese", "\u9ad8\u4e2d\u8bed\u6587", "Humanities"], + "high_school_history": ["High School History", "\u9ad8\u4e2d\u5386\u53f2", "Humanities"], + "middle_school_history": ["Middle School History", "\u521d\u4e2d\u5386\u53f2", "Humanities"], + "civil_servant": ["Civil Servant", "\u516c\u52a1\u5458", "Other"], + "sports_science": ["Sports Science", "\u4f53\u80b2\u5b66", "Other"], + "plant_protection": ["Plant Protection", "\u690d\u7269\u4fdd\u62a4", "Other"], + "basic_medicine": ["Basic Medicine", "\u57fa\u7840\u533b\u5b66", "Other"], + "clinical_medicine": ["Clinical Medicine", "\u4e34\u5e8a\u533b\u5b66", "Other"], + "urban_and_rural_planner": ["Urban and Rural Planner", "\u6ce8\u518c\u57ce\u4e61\u89c4\u5212\u5e08", "Other"], + "accountant": ["Accountant", "\u6ce8\u518c\u4f1a\u8ba1\u5e08", "Other"], + "fire_engineer": ["Fire Engineer", "\u6ce8\u518c\u6d88\u9632\u5de5\u7a0b\u5e08", "Other"], + "environmental_impact_assessment_engineer": ["Environmental Impact Assessment Engineer", "\u73af\u5883\u5f71\u54cd\u8bc4\u4ef7\u5de5\u7a0b\u5e08", "Other"], + "tax_accountant": ["Tax Accountant", "\u7a0e\u52a1\u5e08", "Other"], + "physician": ["Physician", "\u533b\u5e08\u8d44\u683c", "Other"] +} +hard_list = ['advanced_mathematics', 'discrete_mathematics', 'probability_and_statistics', 'college_physics', 'college_chemistry', 'high_school_mathematics', 'high_school_physics', 'high_school_chemistry'] +choices = ["A", "B", "C", "D"] + + +def main(args): + print("loading model weights") + if args.checkpoint_path: + model, tokenizer = load_models_tokenizer(args) + else: + model, tokenizer = None, None + print("model loaded") + dev_result = {} + for subject_name in tqdm(TASK_NAME_MAPPING.keys()): + val_file_path = os.path.join(args.eval_data_path, 'val', f'{subject_name}_val.csv') + # dev_file_path = os.path.join(args.eval_data_path, 'dev', f'{subject_name}_dev.csv') + # test_file_path = os.path.join(args.eval_data_path, 'test', f'{subject_name}_test.csv') + val_df = pd.read_csv(val_file_path) + # dev_df = pd.read_csv(dev_file_path) + # test_df = pd.read_csv(test_file_path) + + score = eval_subject(model, tokenizer, subject_name, val_df, + save_result_dir=f"outs_chat/ceval_eval_result", overwrite=args.overwrite) + dev_result[subject_name] = score + cal_ceval(dev_result) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Test HF checkpoint.') + parser.add_argument('-c', '--checkpoint-path', type=str, help='Checkpoint path', default="Qwen/Qwen-7B-Chat") + parser.add_argument('-s', '--seed', type=int, default=1234, help='Random seed') + + """Provide extra arguments required for tasks.""" + group = parser.add_argument_group(title='Evaluation options') + group.add_argument('-d', '--eval_data_path', type=str, required=True, + help='Path to eval data') + group.add_argument("--debug", action='store_true', default=False, + help='Print infos.') + group.add_argument("--overwrite", action='store_true', default=False, + help='Overwrite existed results') + + args = parser.parse_args() + set_seed(args.seed) + + main(args) \ No newline at end of file diff --git a/eval/evaluate_chat_gsm8k.py b/eval/evaluate_chat_gsm8k.py new file mode 100644 index 0000000..1358264 --- /dev/null +++ b/eval/evaluate_chat_gsm8k.py @@ -0,0 +1,137 @@ +import random +import tqdm +import os +import re +import sys +import torch +import numpy as np +import jsonlines +import argparse +import json +from pathlib import Path +from datasets import load_from_disk,load_dataset +from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers.generation import GenerationConfig + +''' +python eval/evaluate_chat_gsm8k.py [--use-fewshot] +''' + +INVALID_ANS = "[invalid]" +DEVICE = "cuda:0" + +def doc_to_text(doc, use_fewshot): + if use_fewshot: + context = "Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\n" \ + "Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n\n" \ + "Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\n" \ + "Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n\n" \ + "Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\n" \ + "When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n\n" \ + "Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\n" \ + "For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n\n" \ + f"Question: {doc['question']}\nLet's think step by step" + else: + context = doc['question'] + return context + +def decode(tokens_list, tokenizer, raw_text_len): + sents = [] + # print(len(tokens_list)) + for tokens in tokens_list: + tokens = tokens.cpu().numpy().tolist() + sent = tokenizer.tokenizer.decode( + tokens[raw_text_len:]) + sent = sent.split('<|endoftext|>')[0] + sent = sent.split('\n\n\n')[0] + sent = sent.split("\n\n")[0] + sent = sent.split("Question:")[0] + sents.append(sent) + return sents + +def generate_sample(model, tokenizer, question): + response, history = model.chat( + tokenizer, + question, + history=None, + ) + print(question) + print("-------------") + print(response) + print("=============") + return response + + +def extract_answer_hf(completion): + def _get_last_digit(s): + _PAT_LAST_DIGIT = re.compile(r"(?<=(\s|[\$%#{]))([+-])?(?=(\S))(0|([1-9](\d*|\d{0,2}(,\d{3})*)))?(\.\d*[1-9])?(?=(\s|[.,}]|$))") + match = list(_PAT_LAST_DIGIT.finditer(s)) + if match: + last_digit = match[-1].group().replace(",", "").replace("+", "") + # print(f"The last digit in {s} is {last_digit}") + else: + last_digit = None + print(f"No digits found in {s!r}") + return last_digit + + job_gen = completion.strip('.').replace('\n', '\\n') + last_digit = _get_last_digit(job_gen) + if last_digit is not None: + return eval(last_digit) + else: + return INVALID_ANS + +def extract_answer(completion): + try: + last_number = re.findall(r'\d+', completion)[-1] + return eval(last_number) + except: + return INVALID_ANS + +def is_correct( completion, answer): + gold = extract_answer(answer) + assert gold != INVALID_ANS, "No ground truth answer found in the document." + return extract_answer(completion) == gold + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Test HF checkpoint.') + parser.add_argument("-c", "--checkpoint-path", type=Path, help="Checkpoint path", default="Qwen/Qwen-7B-Chat") + parser.add_argument("-f","--sample-input-file", type=str, default=None) + parser.add_argument("-o","--sample-output-file", type=str, default="gsm8k_res.jsonl") + parser.add_argument("--use-fewshot", action="store_true") + + args = parser.parse_args() + + if args.sample_input_file is not None: + dataset = load_from_disk(args.sample_input_file)# or: + else: + dataset = load_dataset("gsm8k", "main") + + print('Loading tokenizer ...') + tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True, bf16=True, use_flash_attn=True) + + print('Loading model ...') + model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True).eval() + model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True) + model.generation_config.do_sample = False # use greedy decoding + + test = dataset["test"] + + f_output = open(args.sample_output_file, 'w', encoding='utf-8') + tot_length = test.num_rows + acc_res = [] + for doc in tqdm.tqdm(test): + context = doc_to_text(doc, args.use_fewshot) + print(context) + completion = generate_sample(model, tokenizer, context) + answer = doc["answer"] + acc = is_correct(completion, answer) + doc["completion"] = completion + doc["acc"] = acc + f_output.write(json.dumps(doc, ensure_ascii=False) + "\n") + f_output.flush() + acc_res.append(acc) + + f_output.close() + print("4-shot Acc: " if args.use_fewshot else "Zero-shot Acc", np.mean(acc_res)) diff --git a/eval/evaluate_chat_humaneval.py b/eval/evaluate_chat_humaneval.py new file mode 100644 index 0000000..c80c195 --- /dev/null +++ b/eval/evaluate_chat_humaneval.py @@ -0,0 +1,82 @@ +import random +import tqdm +import os +import sys +import torch +import jsonlines +import argparse +import jsonlines +from pathlib import Path +import re +import textwrap +from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers.generation import GenerationConfig + +""" +Get the HumanEval.jsonl file from [here](https://github.com/openai/human-eval/tree/master/data) + +python eval/evaluate_chat_humaneval.py -f HumanEval.jsonl -o HumanEval_res.jsonl +git clone https://github.com/openai/human-eval +pip install -e human-eval +evaluate_functional_correctness HumanEval_res.jsonl +""" + +DEVICE = "cuda:0" + +def extract_code(text, entry_point): + + # 正则表达式匹配代码块 + code_block_pattern = re.compile(rf"```(?:[Pp]ython\n)?.*?def\s+{entry_point}.*?:\n(.*?)\n```", re.DOTALL) + code_block = code_block_pattern.search(text) + if code_block is None: + code_block_pattern = re.compile(rf"def\s+{entry_point}.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL) + code_block = code_block_pattern.search(text) + if code_block is None: + code_block_pattern = re.compile(rf"def.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL) + code_block = code_block_pattern.search(text) + + if code_block is not None: + return code_block.group(1) + else: + # if no code block is found, assume the LM is simply filling the code + return textwrap.indent(text, ' ' * 4) + +def generate_sample(model, tokenizer, question, entry_point): + response, history = model.chat( + tokenizer, + question, + history=None, + ) + print(question) + print(response) + answer = extract_code(response, entry_point) + return answer, response + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description='Test HF checkpoint.') + parser.add_argument("-c", "--checkpoint-path", type=Path, help='Checkpoint path', default="Qwen/Qwen-7B-Chat") + parser.add_argument("-f","--sample-input-file", type=str, default=None, help="data path to HumanEval.jsonl") + parser.add_argument("-o","--sample-output-file", type=str, default="HumanEval_res.jsonl") + + + args = parser.parse_args() + print('Loading tokenizer ...') + tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True) + + print('Loading model ...') + model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True, bf16=True, use_flash_attn=True).eval() + model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True) + model.generation_config.do_sample = False # use greedy decoding + + f_output = jsonlines.Writer(open(args.sample_output_file, 'w', encoding='utf-8')) + + f = jsonlines.open(args.sample_input_file) + with f_output as output: + for jobj in tqdm.tqdm(f, desc='task_idx'): + prompt = "Help me fill the following code.\n" + jobj['prompt'] + task_id = jobj['task_id'] + answer, response = generate_sample(model, tokenizer, prompt, jobj['entry_point']) + gen_jobjs = {'task_id': task_id, "completion": answer, 'response': response} + output.write(gen_jobjs) + f_output.close() diff --git a/eval/evaluate_chat_mmlu.py b/eval/evaluate_chat_mmlu.py new file mode 100644 index 0000000..1fbf94e --- /dev/null +++ b/eval/evaluate_chat_mmlu.py @@ -0,0 +1,207 @@ +import os +import pandas as pd +import numpy as np +import argparse +import datasets +import torch +import re +from thefuzz import process +from typing import List +from tqdm import tqdm +from transformers.trainer_utils import set_seed + +''' +wget https://people.eecs.berkeley.edu/~hendrycks/data.tar +mkdir data/mmlu +mv data.tar data/mmlu +cd data/mmlu; tar xf data.tar +cd ../../ + +pip install thefuzz +python eval/evaluate_chat_mmlu.py -d data/mmlu/data/ +''' + +def load_models_tokenizer(args): + from transformers import AutoModelForCausalLM, AutoTokenizer + from transformers.generation import GenerationConfig + + tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True) + model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True, bf16=True, use_flash_attn=True).eval() + model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True) + model.generation_config.do_sample = False # use greedy decoding + return model, tokenizer + + +def format_example(line): + example = 'The following is a multiple-choice question. Please choose the most suitable one among A, B, C and D as the answer to this question.\n\n' + line['question'] + "\n" + for choice in choices: + example += f'{choice}. {line[f"{choice}"]}\n' + return example + + +def process_before_extraction(gen, choice_dict): + # replace the choice by letter in the generated sentence + # from longest one to shortest one + for key, val in sorted(choice_dict.items(), key=lambda x: len(x[1]), reverse=True): + pattern = re.compile(re.escape(val.rstrip(".")), re.IGNORECASE) + gen = pattern.sub(key, gen) + return gen + +def extract_choice(gen, choice_list): + # answer is A | choice is A | choose A + res = re.search(r"(?:(?:[Cc]hoose)|(?:(?:[Aa]nswer|[Cc]hoice)(?![^ABCD]{0,20}?(?:n't|not))[^ABCD]{0,10}?\b(?:|is|:|be))\b)[^ABCD]{0,20}?\b(A|B|C|D)\b", gen) + + # A is correct | A is right + if res is None: + res = re.search(r"\b(A|B|C|D)\b(?![^ABCD]{0,8}?(?:n't|not)[^ABCD]{0,5}?(?:correct|right))[^ABCD]{0,10}?\b(?:correct|right)\b", gen) + + # straight answer: A + if res is None: + res = re.search(r"^(A|B|C|D)(?:\.|,|:|$)", gen) + + # simply extract the first appearred letter + if res is None: + res = re.search(r"(?