Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -16,9 +16,9 @@ logger.info(f"Using device: {device}")
|
|
16 |
|
17 |
# Response cache
|
18 |
response_cache = {
|
19 |
-
"hi": "Hello! I'm your financial advisor. How can I help with investing?",
|
20 |
-
"hello": "Hello! I'm your financial advisor. How can I help with investing?",
|
21 |
-
"hey": "Hi there! Ready to discuss investment goals?",
|
22 |
"what is better individual stocks or etfs?": (
|
23 |
"Here’s a comparison of individual stocks vs. ETFs:\n"
|
24 |
"1. **Individual Stocks**: Offer high potential returns by investing in specific companies (e.g., Apple) but carry higher risk due to lack of diversification. Require active research.\n"
|
@@ -26,6 +26,24 @@ response_cache = {
|
|
26 |
"3. **Recommendation**: Beginners should start with ETFs for stability; experienced investors may add stocks for growth.\n"
|
27 |
"Consult a financial planner for personalized advice."
|
28 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
"hi, give me step-by-step investing advice": (
|
30 |
"Here’s a step-by-step guide to start investing:\n"
|
31 |
"1. Open a brokerage account (e.g., Fidelity, Vanguard) if 18 or older.\n"
|
@@ -142,9 +160,13 @@ except Exception as e:
|
|
142 |
|
143 |
# Pre-tokenize prompt prefix
|
144 |
prompt_prefix = (
|
145 |
-
"You are a financial advisor. Provide numbered list advice for investing prompts. "
|
146 |
-
"Avoid repetition.\n\n"
|
147 |
-
"Example: Q:
|
|
|
|
|
|
|
|
|
148 |
"Q: "
|
149 |
)
|
150 |
prefix_tokens = tokenizer(prompt_prefix, return_tensors="pt", truncation=True, max_length=512).to(device)
|
@@ -174,7 +196,7 @@ def chat_with_model(user_input, history=None):
|
|
174 |
# Skip model for short prompts
|
175 |
if len(user_input.strip()) <= 5:
|
176 |
logger.info("Short prompt, returning default response")
|
177 |
-
response = "Hello! I'm your financial advisor. Ask about investing!"
|
178 |
logger.info(f"Chatbot response: {response}")
|
179 |
history = history or []
|
180 |
history.append({"role": "user", "content": user_input})
|
@@ -196,12 +218,13 @@ def chat_with_model(user_input, history=None):
|
|
196 |
|
197 |
# Generate response
|
198 |
with torch.inference_mode():
|
|
|
199 |
outputs = model.generate(
|
200 |
**inputs,
|
201 |
-
max_new_tokens=
|
202 |
-
min_length=
|
203 |
do_sample=True,
|
204 |
-
temperature=0.
|
205 |
top_p=0.9,
|
206 |
repetition_penalty=1.2,
|
207 |
pad_token_id=tokenizer.eos_token_id
|
@@ -210,14 +233,9 @@ def chat_with_model(user_input, history=None):
|
|
210 |
response = response[len(full_prompt):].strip() if response.startswith(full_prompt) else response
|
211 |
logger.info(f"Chatbot response: {response}")
|
212 |
|
213 |
-
# Update cache
|
214 |
response_cache[cache_key] = response
|
215 |
-
|
216 |
-
with open(cache_file, 'w') as f:
|
217 |
-
json.dump(response_cache, f, indent=2)
|
218 |
-
logger.info("Updated cache.json")
|
219 |
-
except Exception as e:
|
220 |
-
logger.warning(f"Failed to update cache.json: {e}")
|
221 |
|
222 |
# Update history
|
223 |
history = history or []
|
@@ -234,10 +252,26 @@ def chat_with_model(user_input, history=None):
|
|
234 |
history.append({"role": "assistant", "content": response})
|
235 |
return response, history
|
236 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
# Create Gradio interface
|
238 |
logger.info("Initializing Gradio interface")
|
239 |
try:
|
240 |
-
with gr.Blocks() as interface:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
chatbot = gr.Chatbot(type="messages")
|
242 |
msg = gr.Textbox(label="Your message")
|
243 |
submit = gr.Button("Send")
|
@@ -253,6 +287,7 @@ try:
|
|
253 |
outputs=[msg, chatbot]
|
254 |
)
|
255 |
clear.click(lambda: None, None, chatbot)
|
|
|
256 |
except Exception as e:
|
257 |
logger.error(f"Error initializing Gradio interface: {e}")
|
258 |
raise
|
@@ -265,5 +300,9 @@ if __name__ == "__main__" and not os.getenv("HF_SPACE"):
|
|
265 |
except Exception as e:
|
266 |
logger.error(f"Error launching interface: {e}")
|
267 |
raise
|
|
|
|
|
268 |
else:
|
269 |
-
logger.info("Running in Hugging Face Spaces, interface defined but not launched")
|
|
|
|
|
|
16 |
|
17 |
# Response cache
|
18 |
response_cache = {
|
19 |
+
"hi": "Hello! I'm FinChat, your financial advisor. How can I help with investing?",
|
20 |
+
"hello": "Hello! I'm FinChat, your financial advisor. How can I help with investing?",
|
21 |
+
"hey": "Hi there! Ready to discuss investment goals with FinChat?",
|
22 |
"what is better individual stocks or etfs?": (
|
23 |
"Here’s a comparison of individual stocks vs. ETFs:\n"
|
24 |
"1. **Individual Stocks**: Offer high potential returns by investing in specific companies (e.g., Apple) but carry higher risk due to lack of diversification. Require active research.\n"
|
|
|
26 |
"3. **Recommendation**: Beginners should start with ETFs for stability; experienced investors may add stocks for growth.\n"
|
27 |
"Consult a financial planner for personalized advice."
|
28 |
),
|
29 |
+
"is $100 per month enough to invest?": (
|
30 |
+
"Yes, $100 per month is enough to start investing. Here’s why and how:\n"
|
31 |
+
"1. **Feasibility**: Many brokerages (e.g., Fidelity, Vanguard) have no minimums, and commission-free trading eliminates fee concerns.\n"
|
32 |
+
"2. **Options**: You can buy fractional shares of ETFs (e.g., VOO, ~$500/share) or low-cost stocks, making $100 viable.\n"
|
33 |
+
"3. **Strategy**: Use dollar-cost averaging to invest $100 monthly, reducing market timing risks.\n"
|
34 |
+
"4. **Growth**: Over time, $100 monthly can grow significantly with compound interest (e.g., 7% annual return could yield ~$40,000 in 20 years).\n"
|
35 |
+
"5. **Considerations**: Ensure you have an emergency fund first; diversify to manage risk.\n"
|
36 |
+
"Consult a financial planner for tailored advice."
|
37 |
+
),
|
38 |
+
"can i invest $100 a month?": (
|
39 |
+
"Yes, $100 a month is sufficient to start investing. Here’s how:\n"
|
40 |
+
"1. **Brokerage**: Open an account with Fidelity or Vanguard, which offer no minimums.\n"
|
41 |
+
"2. **Investments**: Buy fractional shares of ETFs like VOO ($100 buys ~0.2 shares) or low-cost index funds.\n"
|
42 |
+
"3. **Approach**: Use dollar-cost averaging to invest $100 monthly for steady growth.\n"
|
43 |
+
"4. **Long-Term**: At a 7% annual return, $100 monthly could grow to ~$40,000 in 20 years.\n"
|
44 |
+
"5. **Tips**: Prioritize an emergency fund and diversify.\n"
|
45 |
+
"Consult a financial planner."
|
46 |
+
),
|
47 |
"hi, give me step-by-step investing advice": (
|
48 |
"Here’s a step-by-step guide to start investing:\n"
|
49 |
"1. Open a brokerage account (e.g., Fidelity, Vanguard) if 18 or older.\n"
|
|
|
160 |
|
161 |
# Pre-tokenize prompt prefix
|
162 |
prompt_prefix = (
|
163 |
+
"You are a financial advisor. Provide detailed, numbered list advice with clear reasoning for investing prompts. "
|
164 |
+
"Avoid repetition and incomplete answers. Explain why each step or choice is beneficial.\n\n"
|
165 |
+
"Example: Q: Can I invest $100 a month?\n"
|
166 |
+
"A: Yes, $100 a month is sufficient to start. Here’s how:\n"
|
167 |
+
"1. Open a brokerage account (e.g., Fidelity): No minimums allow small investments, making it accessible.\n"
|
168 |
+
"2. Buy fractional ETF shares (e.g., VOO): Diversifies risk across many companies.\n"
|
169 |
+
"3. Use dollar-cost averaging: Investing regularly reduces market timing risks.\n\n"
|
170 |
"Q: "
|
171 |
)
|
172 |
prefix_tokens = tokenizer(prompt_prefix, return_tensors="pt", truncation=True, max_length=512).to(device)
|
|
|
196 |
# Skip model for short prompts
|
197 |
if len(user_input.strip()) <= 5:
|
198 |
logger.info("Short prompt, returning default response")
|
199 |
+
response = "Hello! I'm FinChat, your financial advisor. Ask about investing!"
|
200 |
logger.info(f"Chatbot response: {response}")
|
201 |
history = history or []
|
202 |
history.append({"role": "user", "content": user_input})
|
|
|
218 |
|
219 |
# Generate response
|
220 |
with torch.inference_mode():
|
221 |
+
logger.info("Generating response with model")
|
222 |
outputs = model.generate(
|
223 |
**inputs,
|
224 |
+
max_new_tokens=80,
|
225 |
+
min_length=20,
|
226 |
do_sample=True,
|
227 |
+
temperature=0.6,
|
228 |
top_p=0.9,
|
229 |
repetition_penalty=1.2,
|
230 |
pad_token_id=tokenizer.eos_token_id
|
|
|
233 |
response = response[len(full_prompt):].strip() if response.startswith(full_prompt) else response
|
234 |
logger.info(f"Chatbot response: {response}")
|
235 |
|
236 |
+
# Update cache
|
237 |
response_cache[cache_key] = response
|
238 |
+
logger.info("Cache miss, added to in-memory cache")
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
# Update history
|
241 |
history = history or []
|
|
|
252 |
history.append({"role": "assistant", "content": response})
|
253 |
return response, history
|
254 |
|
255 |
+
# Save cache on exit
|
256 |
+
def save_cache():
|
257 |
+
try:
|
258 |
+
with open(cache_file, 'w') as f:
|
259 |
+
json.dump(response_cache, f, indent=2)
|
260 |
+
logger.info("Saved cache to cache.json")
|
261 |
+
except Exception as e:
|
262 |
+
logger.warning(f"Failed to save cache.json: {e}")
|
263 |
+
|
264 |
# Create Gradio interface
|
265 |
logger.info("Initializing Gradio interface")
|
266 |
try:
|
267 |
+
with gr.Blocks(title="FinChat: An LLM based on distilgpt2 model") as interface:
|
268 |
+
gr.Markdown(
|
269 |
+
"""
|
270 |
+
# FinChat: An LLM based on distilgpt2 model
|
271 |
+
FinChat provides financial advice using the lightweight distilgpt2 model, optimized for fast, detailed responses.
|
272 |
+
Ask about investing strategies, ETFs, stocks, or budgeting to get started!
|
273 |
+
"""
|
274 |
+
)
|
275 |
chatbot = gr.Chatbot(type="messages")
|
276 |
msg = gr.Textbox(label="Your message")
|
277 |
submit = gr.Button("Send")
|
|
|
287 |
outputs=[msg, chatbot]
|
288 |
)
|
289 |
clear.click(lambda: None, None, chatbot)
|
290 |
+
logger.info("Gradio interface initialized successfully")
|
291 |
except Exception as e:
|
292 |
logger.error(f"Error initializing Gradio interface: {e}")
|
293 |
raise
|
|
|
300 |
except Exception as e:
|
301 |
logger.error(f"Error launching interface: {e}")
|
302 |
raise
|
303 |
+
finally:
|
304 |
+
save_cache()
|
305 |
else:
|
306 |
+
logger.info("Running in Hugging Face Spaces, interface defined but not launched")
|
307 |
+
import atexit
|
308 |
+
atexit.register(save_cache)
|