alexmarques commited on
Commit
e59ac87
·
verified ·
1 Parent(s): 9a48f80

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -41,6 +41,7 @@ from vllm import LLM, SamplingParams
41
  from transformers import AutoTokenizer
42
 
43
  model_id = "neuralmagic/Meta-Llama-3-8B-Instruct-quantized.w8a16"
 
44
 
45
  sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256)
46
 
@@ -53,7 +54,7 @@ messages = [
53
 
54
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
55
 
56
- llm = LLM(model=model_id, tensor_parallel_size=1)
57
 
58
  outputs = llm.generate(prompts, sampling_params)
59
 
 
41
  from transformers import AutoTokenizer
42
 
43
  model_id = "neuralmagic/Meta-Llama-3-8B-Instruct-quantized.w8a16"
44
+ number_gpus = 1
45
 
46
  sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256)
47
 
 
54
 
55
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
56
 
57
+ llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
58
 
59
  outputs = llm.generate(prompts, sampling_params)
60