|
from transformers import AutoTokenizer |
|
from vllm import LLM, SamplingParams |
|
import argparse |
|
import json |
|
from tqdm import tqdm |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--judge', type=str,help='模型路径') |
|
parser.add_argument('--start', type=int,help='开始') |
|
parser.add_argument('--end', type=int,help='终止') |
|
args = parser.parse_args() |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(args.judge, trust_remote_code=True) |
|
|
|
|
|
llm = LLM(args.judge, dtype='float16', tensor_parallel_size=8, trust_remote_code=True, enforce_eager=True, max_model_len=5400) |
|
sampling_params = SamplingParams(temperature=1.0, top_p=0.95, max_tokens=5400) |
|
|
|
|
|
|
|
f = open("/home/aiscuser/fhw/data/all_instruct_with_answers.json", "r+") |
|
lines = f.readlines()[args.start:args.end] |
|
fw = open(f"/home/aiscuser/fhw/data/all_tasks_{args.start}_{args.end}.json", 'w+') |
|
prompts = [] |
|
for line in tqdm(lines): |
|
d = json.loads(line) |
|
instruction = d["instruction"] |
|
prompt = f"Classify the following instruction into one of the predefined categories.\n[Instruction]:\n{instruction}\n[Categories]:\n1. Code Generation: Generating source code based on certain specifications or requirements.\n2. Code Debugging: Identifying, diagnosing, and fixing errors or bugs in a code snippet.\n3. Code Optimization: Improving a program's performance, efficiency, or resource usage without changing its functionality.\n4. Code Reasoning: Predicting the output based on the given input or predicting the input from the known output.\n5. Code Analysis: Analyzing, understanding, and explaining how a piece of code works.\n6. Theoretical Explanation: Answering the questions about principles, theories, and properties of programming language without involving the real code snippets.\n7. Code Transpile: Converting source code from one programming language into another programming language." |
|
messages = [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt}] |
|
text = tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False |
|
) |
|
prompts.append(text) |
|
outputs = llm.generate(prompts=prompts, sampling_params=sampling_params) |
|
for line, output in zip(lines, outputs): |
|
d =json.loads(line) |
|
d["task"] = output.outputs[0].text |
|
fw.write(json.dumps(d)+"\n") |
|
|