saikub commited on
Commit
0b6fa72
·
verified ·
1 Parent(s): 0be76a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +192 -185
app.py CHANGED
@@ -1,203 +1,203 @@
1
 
2
- import numpy as np
3
- import streamlit as st
4
- from openai import OpenAI
5
- import os
6
- import sys
7
- from dotenv import load_dotenv, dotenv_values
8
- load_dotenv()
9
-
10
-
11
-
12
-
13
-
14
- # initialize the client
15
- client = OpenAI(
16
- base_url="https://api-inference.huggingface.co/v1",
17
- api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
18
- )
19
-
20
-
21
-
22
-
23
- #Create supported models
24
- model_links ={
25
- "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
26
- "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
27
- "Gemma-7B":"google/gemma-1.1-7b-it",
28
- "Gemma-2B":"google/gemma-1.1-2b-it",
29
- "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
30
-
31
- }
32
-
33
- #Pull info about the model to display
34
- model_info ={
35
- "Mistral-7B":
36
- {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
37
- \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
38
- 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
39
- "Gemma-7B":
40
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
41
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
42
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
43
- "Gemma-2B":
44
- {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
45
- \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
46
- 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
47
- "Zephyr-7B":
48
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
49
- \nFrom Huggingface: \n\
50
- Zephyr is a series of language models that are trained to act as helpful assistants. \
51
- [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
52
- is the third model in the series, and is a fine-tuned version of google/gemma-7b \
53
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
54
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
55
- "Zephyr-7B-β":
56
- {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
57
- \nFrom Huggingface: \n\
58
- Zephyr is a series of language models that are trained to act as helpful assistants. \
59
- [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
60
- is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
61
- that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
62
- 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
63
- "Meta-Llama-3-8B":
64
- {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
65
- \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
66
- 'logo':'Llama_logo.png'},
67
- }
68
-
69
-
70
- #Random dog images for error message
71
- random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
72
- "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
73
- "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
74
- "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
75
- "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
76
- "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
77
- "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
78
- "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
79
- "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
80
- "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
81
- "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
82
- "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
83
- "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
84
-
85
-
86
-
87
- def reset_conversation():
88
- '''
89
- Resets Conversation
90
- '''
91
- st.session_state.conversation = []
92
- st.session_state.messages = []
93
- return None
94
 
95
 
96
 
97
 
98
- # Define the available models
99
- models =[key for key in model_links.keys()]
100
 
101
- # Create the sidebar with the dropdown for model selection
102
- selected_model = st.sidebar.selectbox("Select Model", models)
103
 
104
- #Create a temperature slider
105
- temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
106
 
107
 
108
- #Add reset button to clear conversation
109
- st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
110
 
111
 
112
- # Create model description
113
- st.sidebar.write(f"You're now chatting with **{selected_model}**")
114
- st.sidebar.markdown(model_info[selected_model]['description'])
115
- st.sidebar.image(model_info[selected_model]['logo'])
116
- st.sidebar.markdown("*Generated content may be inaccurate or false.*")
117
 
118
 
119
 
120
 
121
 
122
- if "prev_option" not in st.session_state:
123
- st.session_state.prev_option = selected_model
124
 
125
- if st.session_state.prev_option != selected_model:
126
- st.session_state.messages = []
127
- # st.write(f"Changed to {selected_model}")
128
- st.session_state.prev_option = selected_model
129
- reset_conversation()
130
 
131
 
132
 
133
- #Pull in the model we want to use
134
- repo_id = model_links[selected_model]
135
 
136
 
137
- st.subheader(f'AI - {selected_model}')
138
- # st.title(f'ChatBot Using {selected_model}')
139
 
140
- # Set a default model
141
- if selected_model not in st.session_state:
142
- st.session_state[selected_model] = model_links[selected_model]
143
 
144
- # Initialize chat history
145
- if "messages" not in st.session_state:
146
- st.session_state.messages = []
147
 
148
 
149
- # Display chat messages from history on app rerun
150
- for message in st.session_state.messages:
151
- with st.chat_message(message["role"]):
152
- st.markdown(message["content"])
153
 
154
 
155
 
156
- # Accept user input
157
- if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
158
 
159
- # Display user message in chat message container
160
- with st.chat_message("user"):
161
- st.markdown(prompt)
162
- # Add user message to chat history
163
- st.session_state.messages.append({"role": "user", "content": prompt})
164
 
165
 
166
- # Display assistant response in chat message container
167
- with st.chat_message("assistant"):
168
 
169
- try:
170
- stream = client.chat.completions.create(
171
- model=model_links[selected_model],
172
- messages=[
173
- {"role": m["role"], "content": m["content"]}
174
- for m in st.session_state.messages
175
- ],
176
- temperature=temp_values,#0.5,
177
- stream=True,
178
- max_tokens=3000,
179
- )
180
 
181
- response = st.write_stream(stream)
182
 
183
- except Exception as e:
184
- # st.empty()
185
- response = "😵‍💫 Looks like someone unplugged something!\
186
- \n Either the model space is being updated or something is down.\
187
- \n\
188
- \n Try again later. \
189
- \n\
190
- \n Here's a random pic of a 🐶:"
191
- st.write(response)
192
- random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
193
- st.image(random_dog_pick)
194
- st.write("This was the error message:")
195
- st.write(e)
196
 
197
 
198
 
199
 
200
- st.session_state.messages.append({"role": "assistant", "content": response})
201
 
202
  # import gradio as gr
203
  # from huggingface_hub import InferenceClient
@@ -267,29 +267,36 @@ if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
267
 
268
  # gr.load("models/meta-llama/Meta-Llama-3.1-70B-Instruct").launch()
269
  ########################################
270
- # import streamlit as st
271
- # from transformers import AutoTokenizer, AutoModelForCausalLM
272
 
273
- # # Load model directly
274
- # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
275
- # model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct")
276
 
277
- # # Initialize chat history
278
- # if "chat_history" not in st.session_state:
279
- # st.session_state.chat_history = []
280
-
281
- # # Display chat history
282
- # for chat in st.session_state.chat_history:
283
- # st.write(f"User: {chat['user']}")
284
- # st.write(f"Response: {chat['response']}")
285
-
286
- # # Get user input
287
- # user_input = st.text_input("Enter your message:")
288
-
289
- # # Generate response
290
- # if st.button("Send"):
291
- # inputs = tokenizer(user_input, return_tensors="pt")
292
- # outputs = model.generate(**inputs)
293
- # response = tokenizer.decode(outputs[0], skip_special_tokens=True)
294
- # st.session_state.chat_history.append({"user": user_input, "response": response})
295
- # st.write(f"Response: {response}")
 
 
 
 
 
 
 
 
 
 
1
 
2
+ # import numpy as np
3
+ # import streamlit as st
4
+ # from openai import OpenAI
5
+ # import os
6
+ # import sys
7
+ # from dotenv import load_dotenv, dotenv_values
8
+ # load_dotenv()
9
+
10
+
11
+
12
+
13
+
14
+ # # initialize the client
15
+ # client = OpenAI(
16
+ # base_url="https://api-inference.huggingface.co/v1",
17
+ # api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN')#"hf_xxx" # Replace with your token
18
+ # )
19
+
20
+
21
+
22
+
23
+ # #Create supported models
24
+ # model_links ={
25
+ # "Meta-Llama-3-8B":"meta-llama/Meta-Llama-3-8B-Instruct",
26
+ # "Mistral-7B":"mistralai/Mistral-7B-Instruct-v0.2",
27
+ # "Gemma-7B":"google/gemma-1.1-7b-it",
28
+ # "Gemma-2B":"google/gemma-1.1-2b-it",
29
+ # "Zephyr-7B-β":"HuggingFaceH4/zephyr-7b-beta",
30
+
31
+ # }
32
+
33
+ # #Pull info about the model to display
34
+ # model_info ={
35
+ # "Mistral-7B":
36
+ # {'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
37
+ # \nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
38
+ # 'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
39
+ # "Gemma-7B":
40
+ # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
41
+ # \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **7 billion parameters.** \n""",
42
+ # 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
43
+ # "Gemma-2B":
44
+ # {'description':"""The Gemma model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
45
+ # \nIt was created by the [**Google's AI Team**](https://blog.google/technology/developers/gemma-open-models/) team as has over **2 billion parameters.** \n""",
46
+ # 'logo':'https://pbs.twimg.com/media/GG3sJg7X0AEaNIq.jpg'},
47
+ # "Zephyr-7B":
48
+ # {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
49
+ # \nFrom Huggingface: \n\
50
+ # Zephyr is a series of language models that are trained to act as helpful assistants. \
51
+ # [Zephyr 7B Gemma](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)\
52
+ # is the third model in the series, and is a fine-tuned version of google/gemma-7b \
53
+ # that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
54
+ # 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png'},
55
+ # "Zephyr-7B-β":
56
+ # {'description':"""The Zephyr model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
57
+ # \nFrom Huggingface: \n\
58
+ # Zephyr is a series of language models that are trained to act as helpful assistants. \
59
+ # [Zephyr-7B-β](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta)\
60
+ # is the second model in the series, and is a fine-tuned version of mistralai/Mistral-7B-v0.1 \
61
+ # that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO)\n""",
62
+ # 'logo':'https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/resolve/main/thumbnail.png'},
63
+ # "Meta-Llama-3-8B":
64
+ # {'description':"""The Llama (3) model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
65
+ # \nIt was created by the [**Meta's AI**](https://llama.meta.com/) team and has over **8 billion parameters.** \n""",
66
+ # 'logo':'Llama_logo.png'},
67
+ # }
68
+
69
+
70
+ # #Random dog images for error message
71
+ # random_dog = ["0f476473-2d8b-415e-b944-483768418a95.jpg",
72
+ # "1bd75c81-f1d7-4e55-9310-a27595fa8762.jpg",
73
+ # "526590d2-8817-4ff0-8c62-fdcba5306d02.jpg",
74
+ # "1326984c-39b0-492c-a773-f120d747a7e2.jpg",
75
+ # "42a98d03-5ed7-4b3b-af89-7c4876cb14c3.jpg",
76
+ # "8b3317ed-2083-42ac-a575-7ae45f9fdc0d.jpg",
77
+ # "ee17f54a-83ac-44a3-8a35-e89ff7153fb4.jpg",
78
+ # "027eef85-ccc1-4a66-8967-5d74f34c8bb4.jpg",
79
+ # "08f5398d-7f89-47da-a5cd-1ed74967dc1f.jpg",
80
+ # "0fd781ff-ec46-4bdc-a4e8-24f18bf07def.jpg",
81
+ # "0fb4aeee-f949-4c7b-a6d8-05bf0736bdd1.jpg",
82
+ # "6edac66e-c0de-4e69-a9d6-b2e6f6f9001b.jpg",
83
+ # "bfb9e165-c643-4993-9b3a-7e73571672a6.jpg"]
84
+
85
+
86
+
87
+ # def reset_conversation():
88
+ # '''
89
+ # Resets Conversation
90
+ # '''
91
+ # st.session_state.conversation = []
92
+ # st.session_state.messages = []
93
+ # return None
94
 
95
 
96
 
97
 
98
+ # # Define the available models
99
+ # models =[key for key in model_links.keys()]
100
 
101
+ # # Create the sidebar with the dropdown for model selection
102
+ # selected_model = st.sidebar.selectbox("Select Model", models)
103
 
104
+ # #Create a temperature slider
105
+ # temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
106
 
107
 
108
+ # #Add reset button to clear conversation
109
+ # st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
110
 
111
 
112
+ # # Create model description
113
+ # st.sidebar.write(f"You're now chatting with **{selected_model}**")
114
+ # st.sidebar.markdown(model_info[selected_model]['description'])
115
+ # st.sidebar.image(model_info[selected_model]['logo'])
116
+ # st.sidebar.markdown("*Generated content may be inaccurate or false.*")
117
 
118
 
119
 
120
 
121
 
122
+ # if "prev_option" not in st.session_state:
123
+ # st.session_state.prev_option = selected_model
124
 
125
+ # if st.session_state.prev_option != selected_model:
126
+ # st.session_state.messages = []
127
+ # # st.write(f"Changed to {selected_model}")
128
+ # st.session_state.prev_option = selected_model
129
+ # reset_conversation()
130
 
131
 
132
 
133
+ # #Pull in the model we want to use
134
+ # repo_id = model_links[selected_model]
135
 
136
 
137
+ # st.subheader(f'AI - {selected_model}')
138
+ # # st.title(f'ChatBot Using {selected_model}')
139
 
140
+ # # Set a default model
141
+ # if selected_model not in st.session_state:
142
+ # st.session_state[selected_model] = model_links[selected_model]
143
 
144
+ # # Initialize chat history
145
+ # if "messages" not in st.session_state:
146
+ # st.session_state.messages = []
147
 
148
 
149
+ # # Display chat messages from history on app rerun
150
+ # for message in st.session_state.messages:
151
+ # with st.chat_message(message["role"]):
152
+ # st.markdown(message["content"])
153
 
154
 
155
 
156
+ # # Accept user input
157
+ # if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
158
 
159
+ # # Display user message in chat message container
160
+ # with st.chat_message("user"):
161
+ # st.markdown(prompt)
162
+ # # Add user message to chat history
163
+ # st.session_state.messages.append({"role": "user", "content": prompt})
164
 
165
 
166
+ # # Display assistant response in chat message container
167
+ # with st.chat_message("assistant"):
168
 
169
+ # try:
170
+ # stream = client.chat.completions.create(
171
+ # model=model_links[selected_model],
172
+ # messages=[
173
+ # {"role": m["role"], "content": m["content"]}
174
+ # for m in st.session_state.messages
175
+ # ],
176
+ # temperature=temp_values,#0.5,
177
+ # stream=True,
178
+ # max_tokens=3000,
179
+ # )
180
 
181
+ # response = st.write_stream(stream)
182
 
183
+ # except Exception as e:
184
+ # # st.empty()
185
+ # response = "😵‍💫 Looks like someone unplugged something!\
186
+ # \n Either the model space is being updated or something is down.\
187
+ # \n\
188
+ # \n Try again later. \
189
+ # \n\
190
+ # \n Here's a random pic of a 🐶:"
191
+ # st.write(response)
192
+ # random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
193
+ # st.image(random_dog_pick)
194
+ # st.write("This was the error message:")
195
+ # st.write(e)
196
 
197
 
198
 
199
 
200
+ # st.session_state.messages.append({"role": "assistant", "content": response})
201
 
202
  # import gradio as gr
203
  # from huggingface_hub import InferenceClient
 
267
 
268
  # gr.load("models/meta-llama/Meta-Llama-3.1-70B-Instruct").launch()
269
  ########################################
270
+ from openai import OpenAI
271
+ import streamlit as st
272
 
273
+ st.title("ChatGPT-like clone")
 
 
274
 
275
+ client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
276
+
277
+ if "openai_model" not in st.session_state:
278
+ st.session_state["openai_model"] = "gpt-3.5-turbo"
279
+
280
+ if "messages" not in st.session_state:
281
+ st.session_state.messages = []
282
+
283
+ for message in st.session_state.messages:
284
+ with st.chat_message(message["role"]):
285
+ st.markdown(message["content"])
286
+
287
+ if prompt := st.chat_input("What is up?"):
288
+ st.session_state.messages.append({"role": "user", "content": prompt})
289
+ with st.chat_message("user"):
290
+ st.markdown(prompt)
291
+
292
+ with st.chat_message("assistant"):
293
+ stream = client.chat.completions.create(
294
+ model=st.session_state["openai_model"],
295
+ messages=[
296
+ {"role": m["role"], "content": m["content"]}
297
+ for m in st.session_state.messages
298
+ ],
299
+ stream=True,
300
+ )
301
+ response = st.write_stream(stream)
302
+ st.session_state.messages.append({"role": "assistant", "content": response})