Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -80,6 +80,10 @@ from openai import OpenAI
|
|
80 |
import os
|
81 |
import time
|
82 |
|
|
|
|
|
|
|
|
|
83 |
# Prompts for each subject
|
84 |
prompts = {
|
85 |
"History": "You are a professional history tutor. You explain historical events clearly and vividly. In addition to answering questions, you analyze potential difficulties the user may face and generate a thoughtful follow-up question.",
|
@@ -130,6 +134,18 @@ def predict(user_input, history, subject, model, max_tokens, temperature, top_p)
|
|
130 |
full_message += f" (First Chunk: {first_chunk_time:.2f}s, Total: {total_time:.2f}s)"
|
131 |
yield full_message
|
132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
# Gradio interface
|
134 |
with gr.Blocks(css="footer{display:none !important}") as demo:
|
135 |
gr.Markdown("# ๐ Educational Learning Assistant")
|
@@ -145,21 +161,18 @@ with gr.Blocks(css="footer{display:none !important}") as demo:
|
|
145 |
temperature = gr.Slider(0, 1, value=0.7, label="Temperature")
|
146 |
top_p = gr.Slider(0, 1, value=0.95, label="Top P")
|
147 |
state = gr.State([])
|
148 |
-
|
149 |
-
def wrapped_predict(message, history, model, max_tokens, temperature, top_p):
|
150 |
-
full_response = ""
|
151 |
-
for chunk in predict(message, history, subject, model, max_tokens, temperature, top_p):
|
152 |
-
full_response = chunk
|
153 |
-
history.append([message, full_response])
|
154 |
-
return history, ""
|
155 |
-
'''
|
156 |
# ๐ Use subject=subject to freeze its value
|
157 |
def wrapped_predict(message, history, model, max_tokens, temperature, top_p, subject=subject):
|
158 |
full_response = ""
|
159 |
for chunk in predict(message, history, subject, model, max_tokens, temperature, top_p):
|
160 |
full_response = chunk
|
161 |
history.append([message, full_response])
|
162 |
-
|
|
|
|
|
|
|
|
|
163 |
|
164 |
user_input.submit(
|
165 |
wrapped_predict,
|
|
|
80 |
import os
|
81 |
import time
|
82 |
|
83 |
+
from PIL import Image
|
84 |
+
import requests
|
85 |
+
from io import BytesIO
|
86 |
+
|
87 |
# Prompts for each subject
|
88 |
prompts = {
|
89 |
"History": "You are a professional history tutor. You explain historical events clearly and vividly. In addition to answering questions, you analyze potential difficulties the user may face and generate a thoughtful follow-up question.",
|
|
|
134 |
full_message += f" (First Chunk: {first_chunk_time:.2f}s, Total: {total_time:.2f}s)"
|
135 |
yield full_message
|
136 |
|
137 |
+
# Function to generate image based on prompt
|
138 |
+
def generate_image(prompt, size="256x256"):
|
139 |
+
response = openai.Image.create(
|
140 |
+
prompt=prompt,
|
141 |
+
n=1,
|
142 |
+
size=size
|
143 |
+
)
|
144 |
+
image_url = response['data'][0]['url']
|
145 |
+
image_response = requests.get(image_url)
|
146 |
+
image = Image.open(BytesIO(image_response.content))
|
147 |
+
return image
|
148 |
+
|
149 |
# Gradio interface
|
150 |
with gr.Blocks(css="footer{display:none !important}") as demo:
|
151 |
gr.Markdown("# ๐ Educational Learning Assistant")
|
|
|
161 |
temperature = gr.Slider(0, 1, value=0.7, label="Temperature")
|
162 |
top_p = gr.Slider(0, 1, value=0.95, label="Top P")
|
163 |
state = gr.State([])
|
164 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
# ๐ Use subject=subject to freeze its value
|
166 |
def wrapped_predict(message, history, model, max_tokens, temperature, top_p, subject=subject):
|
167 |
full_response = ""
|
168 |
for chunk in predict(message, history, subject, model, max_tokens, temperature, top_p):
|
169 |
full_response = chunk
|
170 |
history.append([message, full_response])
|
171 |
+
|
172 |
+
# Generate image based on the latest assistant response
|
173 |
+
image = generate_image(text_response)
|
174 |
+
|
175 |
+
return history, "",image
|
176 |
|
177 |
user_input.submit(
|
178 |
wrapped_predict,
|