AnilNiraula commited on
Commit
0aaa9e8
·
verified ·
1 Parent(s): da15462

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -91
app.py CHANGED
@@ -1,94 +1,14 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
- from transformers import BitsAndBytesConfig
3
- import torch
4
- import gradio as gr
5
 
6
- # Define device
7
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
 
9
- # Response cache
10
- response_cache = {
11
- "Hi, pretend you are a financial advisor. Now tell me how can I start investing in stock market?": (
12
- "As a financial advisor, here’s a guide to start investing in the stock market:\n"
13
- "1. **Learn**: Use Investopedia or “The Intelligent Investor” by Benjamin Graham.\n"
14
- "2. **Goals**: Set objectives (e.g., retirement) and assess risk tolerance.\n"
15
- "3. **Brokerage**: Choose Fidelity (low fees), Vanguard (index funds like VTI), or Robinhood (commission-free).\n"
16
- "4. **Investments**: Start with ETFs (e.g., VOO for S&P 500) or mutual funds.\n"
17
- "5. **Strategy**: Use dollar-cost averaging with $100-$500 monthly.\n"
18
- "6. **Risks**: Diversify and monitor.\n"
19
- "Consult a certified financial planner."
20
- ),
21
- "do you have a list of companies you recommend?": (
22
- "I cannot recommend specific companies without current market data. Instead, consider ETFs like VOO (S&P 500) or QQQ (tech-focused) for broad exposure. "
23
- "For individual stocks, research sectors like technology (e.g., Apple, Microsoft) or consumer goods (e.g., Procter & Gamble) using Yahoo Finance or Morningstar. "
24
- "Always consult a certified financial planner."
25
- )
26
- }
27
-
28
- # Load model
29
- model_name = "facebook/opt-1.3B"
30
- try:
31
- tokenizer = AutoTokenizer.from_pretrained(model_name, clean_up_tokenization_spaces=False)
32
- model = AutoModelForCausalLM.from_pretrained(
33
- model_name,
34
- device_map="auto",
35
- torch_dtype=torch.float16,
36
- quantization_config=BitsAndBytesConfig(load_in_8bit=True)
37
- ).to(device)
38
- except Exception as e:
39
- model_name = "facebook/opt-350m"
40
- tokenizer = AutoTokenizer.from_pretrained(model_name, clean_up_tokenization_spaces=False)
41
  model = AutoModelForCausalLM.from_pretrained(
42
- model_name,
43
- device_map="auto",
44
- torch_dtype=torch.float16,
45
- quantization_config=BitsAndBytesConfig(load_in_8bit=True)
46
- ).to(device)
47
-
48
- # Define chat function
49
- def chat_with_model(message, history):
50
- try:
51
- if not isinstance(message, str):
52
- return "Error: User input must be a string"
53
- if message in response_cache:
54
- return response_cache[message]
55
- full_prompt = (
56
- "You are a financial advisor with expertise in stock market investments. "
57
- "Provide accurate, detailed, and actionable advice. "
58
- "If you cannot provide specific recommendations (e.g., individual companies), "
59
- "explain why and offer general guidance or alternative suggestions instead.\n"
60
- )
61
- history = history[-3:] if history else [] # Limit history
62
- for user_msg, assistant_msg in history or []:
63
- if user_msg:
64
- full_prompt += f"User: {user_msg}\n"
65
- if assistant_msg:
66
- full_prompt += f"Assistant: {assistant_msg}\n"
67
- full_prompt += f"User: {message}\nAssistant:"
68
- inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
69
- outputs = model.generate(
70
- **inputs,
71
- max_new_tokens=150,
72
- do_sample=True,
73
- top_p=0.9,
74
- temperature=0.5,
75
- pad_token_id=tokenizer.eos_token_id
76
- )
77
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
78
- return response[len(full_prompt):].strip() if response.startswith(full_prompt) else response
79
- except Exception as e:
80
- return f"Error generating response: {str(e)}"
81
-
82
- # Create Gradio interface
83
- interface = gr.ChatInterface(
84
- fn=chat_with_model,
85
- title="Financial Advisor Chatbot (OPT-1.3B)",
86
- description="Ask for advice on starting to invest in the stock market! Powered by Meta AI's OPT-1.3B.",
87
- examples=[
88
- "Hi, pretend you are a financial advisor. Now tell me how can I start investing in stock market?",
89
- "Do you have a list of companies you recommend?"
90
- ]
91
- )
92
-
93
- # Launch interface
94
- interface.launch()
 
1
+ RuntimeError: No GPU found. A GPU is needed for quantization.
 
 
 
2
 
3
+ During handling of the above exception, another exception occurred:
 
4
 
5
+ Traceback (most recent call last):
6
+ File "/home/user/app/app.py", line 41, in <module>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  model = AutoModelForCausalLM.from_pretrained(
8
+ File "/usr/local/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py", line 564, in from_pretrained
9
+ return model_class.from_pretrained(
10
+ File "/usr/local/lib/python3.10/site-packages/transformers/modeling_utils.py", line 3398, in from_pretrained
11
+ hf_quantizer.validate_environment(
12
+ File "/usr/local/lib/python3.10/site-packages/transformers/quantizers/quantizer_bnb_8bit.py", line 62, in validate_environment
13
+ raise RuntimeError("No GPU found. A GPU is needed for quantization.")
14
+ RuntimeError: No GPU found. A GPU is needed for quantization.