gba16326553 commited on
Commit
718b303
·
verified ·
1 Parent(s): 69105c3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,9 +7,9 @@ title = "🤖AI ChatBot"
7
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
  examples = [["How are you?"]]
9
 
10
- tokenizer = AutoTokenizer.from_pretrained("stvlynn/Gemma-2-2b-Chinese-it")
11
- model = AutoModelForCausalLM.from_pretrained("stvlynn/Gemma-2-2b-Chinese-it", torch_dtype=torch.float16)
12
-
13
  #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
14
  #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
15
  #The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.
 
7
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
  examples = [["How are you?"]]
9
 
10
+ tokenizer = AutoTokenizer.from_pretrained("models/google/gemma-2-2b-it")
11
+ model = AutoModelForCausalLM.from_pretrained("models/google/gemma-2-2b-it", torch_dtype=torch.float16)
12
+ #stvlynn/Gemma-2-2b-Chinese-it
13
  #tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
14
  #model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
15
  #The model was loaded with use_flash_attention_2=True, which is deprecated and may be removed in a future release. Please use `attn_implementation="flash_attention_2"` instead.