valencar commited on
Commit
b990905
·
1 Parent(s): cf75435
Files changed (1) hide show
  1. app.py +32 -11
app.py CHANGED
@@ -12,27 +12,48 @@ question = "Qual é o maior planeta do sistema solar?"
12
 
13
 
14
  before = datetime.datetime.now()
 
 
 
 
 
 
15
 
16
- # Use a pipeline as a high-level helper
17
- from transformers import pipeline
18
 
19
- messages = [
20
- {"role": "user", "content": question},
21
- ]
22
 
23
- print('gerando a saida...')
24
 
25
  st.write('gerando a saida...')
 
 
26
 
27
- pipe = pipeline("text-generation", model="01-ai/Yi-1.5-34B-Chat")
28
 
29
- st.write('pipeline...')
30
 
31
- output = pipe(messages)
32
 
33
- st.write('saída gerada...')
 
34
 
35
- st.write(output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
 
 
12
 
13
 
14
  before = datetime.datetime.now()
15
+
16
+ # Load model directly
17
+ from transformers import AutoTokenizer, AutoModelForCausalLM
18
+
19
+ tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-1.5-34B-Chat")
20
+ model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-1.5-34B-Chat")
21
 
 
 
22
 
23
+ prompt = "Hey, are you conscious? Can you talk to me?"
24
+ inputs = tokenizer(prompt, return_tensors="pt")
 
25
 
26
+ # Generate
27
 
28
  st.write('gerando a saida...')
29
+ generate_ids = model.generate(inputs.input_ids, max_length=30)
30
+ output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
31
 
 
32
 
33
+ st.write('saída gerada')
34
 
35
+ st.write(output)
36
 
37
+ # Use a pipeline as a high-level helper
38
+ # from transformers import pipeline
39
 
40
+ # messages = [
41
+ # {"role": "user", "content": question},
42
+ # ]
43
+
44
+ # print('gerando a saida...')
45
+
46
+ # st.write('gerando a saida...')
47
+
48
+ # pipe = pipeline("text-generation", model="01-ai/Yi-1.5-34B-Chat")
49
+
50
+ # st.write('pipeline...')
51
+
52
+ # output = pipe(messages)
53
+
54
+ # st.write('saída gerada...')
55
+
56
+ # st.write(output)
57
 
58
 
59