Grooty889 commited on
Commit
3fc441b
Β·
verified Β·
1 Parent(s): 7be27ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -4,7 +4,7 @@ from fastapi import FastAPI
4
  from pydantic import BaseModel
5
 
6
  # Load the model and tokenizer
7
- model_name = "google/gemma-2b"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
10
 
@@ -18,7 +18,7 @@ class ChatInput(BaseModel):
18
  @app.post("/chat")
19
  async def chat(chat_input: ChatInput):
20
  inputs = tokenizer(chat_input.user_input, return_tensors="pt").to("cuda")
21
- outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True)
22
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
  return {"response": response_text}
24
 
 
4
  from pydantic import BaseModel
5
 
6
  # Load the model and tokenizer
7
+ model_name = "databricks/dolly-v2-3b"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
10
 
 
18
  @app.post("/chat")
19
  async def chat(chat_input: ChatInput):
20
  inputs = tokenizer(chat_input.user_input, return_tensors="pt").to("cuda")
21
+ outputs = model.generate(**inputs, max_length=200, do_sample=True)
22
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
  return {"response": response_text}
24