Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -14,12 +14,18 @@ logger = logging.getLogger(__name__)
|
|
14 |
device = torch.device("cpu")
|
15 |
logger.info(f"Using device: {device}")
|
16 |
|
17 |
-
#
|
18 |
-
cache_file = "cache.json"
|
19 |
response_cache = {
|
20 |
"hi": "Hello! I'm your financial advisor. How can I help with investing?",
|
21 |
"hello": "Hello! I'm your financial advisor. How can I help with investing?",
|
22 |
"hey": "Hi there! Ready to discuss investment goals?",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
"hi, give me step-by-step investing advice": (
|
24 |
"Here’s a step-by-step guide to start investing:\n"
|
25 |
"1. Open a brokerage account (e.g., Fidelity, Vanguard) if 18 or older.\n"
|
@@ -109,6 +115,7 @@ response_cache = {
|
|
109 |
}
|
110 |
|
111 |
# Load persistent cache
|
|
|
112 |
try:
|
113 |
if os.path.exists(cache_file):
|
114 |
with open(cache_file, 'r') as f:
|
@@ -143,14 +150,14 @@ prompt_prefix = (
|
|
143 |
prefix_tokens = tokenizer(prompt_prefix, return_tensors="pt", truncation=True, max_length=512).to(device)
|
144 |
|
145 |
# Fuzzy matching for cache
|
146 |
-
def get_closest_cache_key(message, cache_keys, threshold=0.
|
147 |
matches = difflib.get_close_matches(message, cache_keys, n=1, cutoff=threshold)
|
148 |
return matches[0] if matches else None
|
149 |
|
150 |
# Define chat function
|
151 |
def chat_with_model(user_input, history=None):
|
152 |
try:
|
153 |
-
logger.info(f"Processing
|
154 |
# Normalize and check cache
|
155 |
cache_key = user_input.lower().strip()
|
156 |
cache_keys = list(response_cache.keys())
|
@@ -176,14 +183,23 @@ def chat_with_model(user_input, history=None):
|
|
176 |
|
177 |
# Construct prompt
|
178 |
full_prompt = prompt_prefix + user_input + "\nA:"
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
# Generate response
|
182 |
-
with torch.
|
183 |
outputs = model.generate(
|
184 |
**inputs,
|
185 |
-
max_new_tokens=
|
186 |
-
min_length=
|
187 |
do_sample=True,
|
188 |
temperature=0.7,
|
189 |
top_p=0.9,
|
@@ -198,7 +214,7 @@ def chat_with_model(user_input, history=None):
|
|
198 |
response_cache[cache_key] = response
|
199 |
try:
|
200 |
with open(cache_file, 'w') as f:
|
201 |
-
json.dump(response_cache, f)
|
202 |
logger.info("Updated cache.json")
|
203 |
except Exception as e:
|
204 |
logger.warning(f"Failed to update cache.json: {e}")
|
|
|
14 |
device = torch.device("cpu")
|
15 |
logger.info(f"Using device: {device}")
|
16 |
|
17 |
+
# Response cache
|
|
|
18 |
response_cache = {
|
19 |
"hi": "Hello! I'm your financial advisor. How can I help with investing?",
|
20 |
"hello": "Hello! I'm your financial advisor. How can I help with investing?",
|
21 |
"hey": "Hi there! Ready to discuss investment goals?",
|
22 |
+
"what is better individual stocks or etfs?": (
|
23 |
+
"Here’s a comparison of individual stocks vs. ETFs:\n"
|
24 |
+
"1. **Individual Stocks**: Offer high potential returns by investing in specific companies (e.g., Apple) but carry higher risk due to lack of diversification. Require active research.\n"
|
25 |
+
"2. **ETFs**: Provide diversification by tracking indices (e.g., VOO for S&P 500), reducing risk but with lower potential returns. Lower fees and less research needed.\n"
|
26 |
+
"3. **Recommendation**: Beginners should start with ETFs for stability; experienced investors may add stocks for growth.\n"
|
27 |
+
"Consult a financial planner for personalized advice."
|
28 |
+
),
|
29 |
"hi, give me step-by-step investing advice": (
|
30 |
"Here’s a step-by-step guide to start investing:\n"
|
31 |
"1. Open a brokerage account (e.g., Fidelity, Vanguard) if 18 or older.\n"
|
|
|
115 |
}
|
116 |
|
117 |
# Load persistent cache
|
118 |
+
cache_file = "cache.json"
|
119 |
try:
|
120 |
if os.path.exists(cache_file):
|
121 |
with open(cache_file, 'r') as f:
|
|
|
150 |
prefix_tokens = tokenizer(prompt_prefix, return_tensors="pt", truncation=True, max_length=512).to(device)
|
151 |
|
152 |
# Fuzzy matching for cache
|
153 |
+
def get_closest_cache_key(message, cache_keys, threshold=0.7):
|
154 |
matches = difflib.get_close_matches(message, cache_keys, n=1, cutoff=threshold)
|
155 |
return matches[0] if matches else None
|
156 |
|
157 |
# Define chat function
|
158 |
def chat_with_model(user_input, history=None):
|
159 |
try:
|
160 |
+
logger.info(f"Processing user input: {user_input}")
|
161 |
# Normalize and check cache
|
162 |
cache_key = user_input.lower().strip()
|
163 |
cache_keys = list(response_cache.keys())
|
|
|
183 |
|
184 |
# Construct prompt
|
185 |
full_prompt = prompt_prefix + user_input + "\nA:"
|
186 |
+
try:
|
187 |
+
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
|
188 |
+
except Exception as e:
|
189 |
+
logger.error(f"Error tokenizing input: {e}")
|
190 |
+
response = f"Error: Failed to process input: {str(e)}"
|
191 |
+
logger.info(f"Chatbot response: {response}")
|
192 |
+
history = history or []
|
193 |
+
history.append({"role": "user", "content": user_input})
|
194 |
+
history.append({"role": "assistant", "content": response})
|
195 |
+
return response, history
|
196 |
|
197 |
# Generate response
|
198 |
+
with torch.inference_mode():
|
199 |
outputs = model.generate(
|
200 |
**inputs,
|
201 |
+
max_new_tokens=30,
|
202 |
+
min_length=10,
|
203 |
do_sample=True,
|
204 |
temperature=0.7,
|
205 |
top_p=0.9,
|
|
|
214 |
response_cache[cache_key] = response
|
215 |
try:
|
216 |
with open(cache_file, 'w') as f:
|
217 |
+
json.dump(response_cache, f, indent=2)
|
218 |
logger.info("Updated cache.json")
|
219 |
except Exception as e:
|
220 |
logger.warning(f"Failed to update cache.json: {e}")
|