|
from transformers import AutoTokenizer |
|
from vllm import LLM, SamplingParams |
|
import argparse |
|
import json |
|
import os |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--path', type=str,help='模型路径') |
|
parser.add_argument("--n", type=int, default=200, help="Number of samples to generate for one time.") |
|
parser.add_argument("--top_p", type=float, default=1.0) |
|
parser.add_argument("--temperature", type=float, default=1.0) |
|
parser.add_argument("--repeat", type=int, default=None, help="Number of times to repeat the instruction generation. Only available when total prompts is not specified.") |
|
parser.add_argument("--max_tokens", type=int, default=2048) |
|
parser.add_argument('--language', type=str,help='语言') |
|
args = parser.parse_args() |
|
|
|
name = args.path[args.path.rfind('/')+1:] |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(args.path, trust_remote_code=True) |
|
|
|
|
|
|
|
|
|
llm = LLM(args.path, dtype="float16", tensor_parallel_size=8, max_model_len=5400, trust_remote_code=True, enforce_eager=True) |
|
|
|
stop_tokens = ["<|end▁of▁sentence|>", "Assistant", "Assistant:", "[/INST]", "</s>"] |
|
sampling_params = SamplingParams( |
|
n=args.n, |
|
temperature=args.temperature, |
|
top_p=args.top_p, |
|
max_tokens=args.max_tokens, |
|
stop=stop_tokens) |
|
|
|
|
|
|
|
|
|
text = f"<s>[SYSTEM_PROMPT] You are an AI assistant designed to provide helpful, step-by-step guidance on {args.language} coding problems. The user will ask you a wide range of {args.language} coding questions. Your purpose is to assist users in understanding {args.language} coding concepts, working through {args.language} code, and arriving at the correct {args.language} solutions.[/SYSTEM_PROMPT][INST] " |
|
|
|
|
|
fw = open("/home/aiscuser/fhw/data/" + name + "_t_" + str(args.temperature) + "_p_" + str(args.top_p) + "_" + args.language + ".json", 'w+') |
|
outputs = llm.generate(prompts=[text]*args.repeat, sampling_params=sampling_params) |
|
for output in outputs: |
|
print(output.outputs[0].text) |
|
fw.write(json.dumps({"instruction": output.outputs[0].text})+"\n") |
|
|
|
|