valencar commited on
Commit
9804ed3
·
verified ·
1 Parent(s): a2a6a3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  # Load model directly
3
- from transformers import AutoModel
4
  from huggingface_hub import login
5
  import os
6
 
@@ -9,11 +9,14 @@ file = 'llama-2-7b.Q5_0.gguf'
9
 
10
  NO_GPU = 0
11
  GPU_LAYERS = 50
12
- model = AutoModel.from_pretrained("valencar/llamm",
13
- model_file=file, model_type="llama", gpu_layers=NO_GPU)
14
 
15
- access_token = os.getenv('HF_TOKEN2')
16
- login(token = access_token)
 
 
 
 
 
17
 
18
  prompt = "AI is going to"
19
 
 
1
  import streamlit as st
2
  # Load model directly
3
+ from transformers import AutoModel, AutoModelForCausalLM
4
  from huggingface_hub import login
5
  import os
6
 
 
9
 
10
  NO_GPU = 0
11
  GPU_LAYERS = 50
 
 
12
 
13
+ llm = AutoModelForCausalLM.from_pretrained(file, model_type="llama", gpu_layers=NO_GPU)
14
+
15
+ # model = AutoModelForCausalLM.from_pretrained("valencar/llamm",
16
+ # model_file=file, model_type="llama", gpu_layers=NO_GPU)
17
+
18
+ # access_token = os.getenv('HF_TOKEN2')
19
+ # login(token = access_token)
20
 
21
  prompt = "AI is going to"
22