metadata
license: apache-2.0
datasets:
- songff/UltraPrompt
language:
- en
base_model:
- meta-llama/Llama-3.2-3B-Instruct
library_name: transformers
P-Aligner
Quick Start
from vllm import LLM, SamplingParams
from transformers import AutoTokenizer
raw_instruction = "What is the capital of France?"
model_path = "P-Aligner"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = LLM(
model=model_path,
gpu_memory_utilization=0.9,
enable_prefix_caching=True,
dtype="bfloat16",
)
outputs = model.generate(
[raw_instruction],
sampling_params=SamplingParams(
temperature=0.0,
max_tokens=2048,
),
)
better_instruction = tokenizer.parse_output(
outputs[0].outputs[0].text,
raw_instruction,
)
print(better_instruction)