AItool commited on
Commit
5aa525a
·
verified ·
1 Parent(s): 420697a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -42
app.py CHANGED
@@ -1,42 +1,45 @@
1
- import os
2
- import streamlit as st
3
- from huggingface_hub import InferenceClient
4
-
5
- # Load the API token from an environment variable
6
- api_key = os.getenv("HF_TOKEN")
7
-
8
- # Instantiate the InferenceClient
9
- client = InferenceClient(api_key=api_key)
10
-
11
- # Streamlit app title
12
- st.title("Hugging Face Inference with Streamlit")
13
-
14
- # Create a text input area for user prompts
15
- with st.form("my_form"):
16
- text = st.text_area("Enter text:", "Tell me a joke to make me laugh.")
17
- submitted = st.form_submit_button("Submit")
18
-
19
- # Initialize the full_text variable
20
- full_text = ""
21
-
22
- if submitted:
23
- messages = [
24
- {"role": "user", "content": text}
25
- ]
26
-
27
- # Create a new stream for each submission
28
- stream = client.chat.completions.create(
29
- model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
30
- messages=messages,
31
- temperature=0.5,
32
- max_tokens=100,
33
- top_p=0.7,
34
- stream=True
35
- )
36
-
37
- # Concatenate chunks to form the full response
38
- for chunk in stream:
39
- full_text += chunk.choices[0].delta.content
40
-
41
- # Display the full response
42
- st.info(full_text)
 
 
 
 
1
+ """
2
+ @author: idoia lerchundi
3
+ """
4
+ import os
5
+ import streamlit as st
6
+ from huggingface_hub import InferenceClient
7
+
8
+ # Load the API token from an environment variable
9
+ api_key = os.getenv("HF_TOKEN")
10
+
11
+ # Instantiate the InferenceClient
12
+ client = InferenceClient(api_key=api_key)
13
+
14
+ # Streamlit app title
15
+ st.title("Hugging Face Inference with Streamlit")
16
+
17
+ # Create a text input area for user prompts
18
+ with st.form("my_form"):
19
+ text = st.text_area("Enter text:", "Tell me a joke to make me laugh.")
20
+ submitted = st.form_submit_button("Submit")
21
+
22
+ # Initialize the full_text variable
23
+ full_text = ""
24
+
25
+ if submitted:
26
+ messages = [
27
+ {"role": "user", "content": text}
28
+ ]
29
+
30
+ # Create a new stream for each submission
31
+ stream = client.chat.completions.create(
32
+ model="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
33
+ messages=messages,
34
+ temperature=0.5,
35
+ max_tokens=100,
36
+ top_p=0.7,
37
+ stream=True
38
+ )
39
+
40
+ # Concatenate chunks to form the full response
41
+ for chunk in stream:
42
+ full_text += chunk.choices[0].delta.content
43
+
44
+ # Display the full response
45
+ st.info(full_text)