Update app.py
Browse files
app.py
CHANGED
@@ -1,82 +1,91 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
client = OpenAI(
|
9 |
base_url="https://api-inference.huggingface.co/v1/",
|
10 |
api_key=ACCESS_TOKEN,
|
11 |
)
|
12 |
-
print("OpenAI client initialized.")
|
13 |
-
|
14 |
-
def respond(
|
15 |
-
message,
|
16 |
-
history: list[tuple[str, str]]
|
17 |
-
):
|
18 |
-
print(f"Received message: {message}")
|
19 |
-
print(f"History: {history}")
|
20 |
-
|
21 |
-
# Hardcoded system message and other parameters
|
22 |
-
system_message = "You are a cryptocurrency trading assistant and market analyst. Your role is to provide users with data-driven insights, technical analysis (RSI, MACD, Bollinger Bands, Moving Averages, Fibonacci retracements, volume analysis, and price action), and investment advice tailored to their risk tolerance. Focus on actionable information, such as market conditions, key indicators, and investment strategies. Avoid speculation and provide clear, concise, and unbiased recommendations based on current data."
|
23 |
-
max_tokens = 512
|
24 |
-
temperature = 0.7
|
25 |
-
top_p = 0.95
|
26 |
-
frequency_penalty = 0.0
|
27 |
-
seed = -1 # Random seed
|
28 |
-
|
29 |
-
# Convert seed to None if -1 (meaning random)
|
30 |
-
if seed == -1:
|
31 |
-
seed = None
|
32 |
-
|
33 |
-
messages = [{"role": "system", "content": system_message}]
|
34 |
-
print("Initial messages array constructed.")
|
35 |
-
|
36 |
-
# Add conversation history to the context
|
37 |
-
for val in history:
|
38 |
-
user_part = val[0]
|
39 |
-
assistant_part = val[1]
|
40 |
-
if user_part:
|
41 |
-
messages.append({"role": "user", "content": user_part})
|
42 |
-
print(f"Added user message to context: {user_part}")
|
43 |
-
if assistant_part:
|
44 |
-
messages.append({"role": "assistant", "content": assistant_part})
|
45 |
-
print(f"Added assistant message to context: {assistant_part}")
|
46 |
-
|
47 |
-
# Append the latest user message
|
48 |
-
messages.append({"role": "user", "content": message})
|
49 |
-
print("Latest user message appended.")
|
50 |
-
|
51 |
-
# Set the model to "meta" by default
|
52 |
-
model_to_use = "meta-llama/Llama-3.3-70B-Instruct"
|
53 |
-
print(f"Model selected for inference: {model_to_use}")
|
54 |
-
|
55 |
-
# Start with an empty string to build the response as tokens stream in
|
56 |
-
response = ""
|
57 |
-
print("Sending request to OpenAI API.")
|
58 |
-
|
59 |
-
for message_chunk in client.chat.completions.create(
|
60 |
-
model=model_to_use,
|
61 |
-
max_tokens=max_tokens,
|
62 |
-
stream=True,
|
63 |
-
temperature=temperature,
|
64 |
-
top_p=top_p,
|
65 |
-
frequency_penalty=frequency_penalty,
|
66 |
-
seed=seed,
|
67 |
-
messages=messages,
|
68 |
-
):
|
69 |
-
token_text = message_chunk.choices[0].delta.content
|
70 |
-
print(f"Received token: {token_text}")
|
71 |
-
response += token_text
|
72 |
-
yield response
|
73 |
-
|
74 |
-
print("Completed response generation.")
|
75 |
-
|
76 |
-
# GRADIO UI
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Start chatting!", likeable=True, layout="panel")
|
79 |
-
print("Chatbot interface created.")
|
80 |
|
81 |
demo = gr.ChatInterface(
|
82 |
fn=respond,
|
@@ -85,14 +94,10 @@ demo = gr.ChatInterface(
|
|
85 |
chatbot=chatbot,
|
86 |
theme="Nymbo/Nymbo_Theme",
|
87 |
)
|
88 |
-
print("ChatInterface object created.")
|
89 |
|
90 |
with demo:
|
91 |
# No need for system message input, model selection, or sliders
|
92 |
pass
|
93 |
|
94 |
-
print("Gradio interface initialized.")
|
95 |
-
|
96 |
if __name__ == "__main__":
|
97 |
-
print("Launching the demo application.")
|
98 |
demo.launch()
|
|
|
1 |
+
import requests
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import datetime
|
4 |
import gradio as gr
|
5 |
from openai import OpenAI
|
6 |
import os
|
7 |
|
8 |
+
# Coingecko API Base URL
|
9 |
+
BASE_URL = "https://api.coingecko.com/api/v3/"
|
10 |
+
|
11 |
+
# Coingecko API'den coin verilerini alma
|
12 |
+
def get_coin_list(currency="usd"):
|
13 |
+
url = f"{BASE_URL}coins/markets?vs_currency={currency}&order=market_cap_desc&per_page=100&page=1&sparkline=false"
|
14 |
+
response = requests.get(url)
|
15 |
+
return response.json()
|
16 |
+
|
17 |
+
def get_single_coin(id, currency="usd"):
|
18 |
+
url = f"{BASE_URL}coins/{id}"
|
19 |
+
response = requests.get(url)
|
20 |
+
return response.json()
|
21 |
|
22 |
+
def get_historical_chart(id, days=365, currency="usd"):
|
23 |
+
url = f"{BASE_URL}coins/{id}/market_chart?vs_currency={currency}&days={days}"
|
24 |
+
response = requests.get(url)
|
25 |
+
return response.json()
|
26 |
+
|
27 |
+
# OpenAI API Key ve Client Initialization
|
28 |
+
ACCESS_TOKEN = os.getenv("HF_TOKEN")
|
29 |
client = OpenAI(
|
30 |
base_url="https://api-inference.huggingface.co/v1/",
|
31 |
api_key=ACCESS_TOKEN,
|
32 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
+
# Hardcoded system message and other parameters
|
35 |
+
system_message = "You are a cryptocurrency trading assistant and market analyst. Your role is to provide users with data-driven insights, technical analysis (RSI, MACD, Bollinger Bands, Moving Averages, Fibonacci retracements, volume analysis, and price action), and investment advice tailored to their risk tolerance. Focus on actionable information, such as market conditions, key indicators, and investment strategies. Avoid speculation and provide clear, concise, and unbiased recommendations based on current data."
|
36 |
+
max_tokens = 1024
|
37 |
+
temperature = 0.3
|
38 |
+
top_p = 0.95
|
39 |
+
frequency_penalty = 0.0
|
40 |
+
seed = -1 # Random seed
|
41 |
+
|
42 |
+
def respond(message, history: list[tuple[str, str]]):
|
43 |
+
# Coin verisini al
|
44 |
+
coin_data = search_coin_by_name(message)
|
45 |
+
if coin_data:
|
46 |
+
# LLM yanıtını oluştur
|
47 |
+
llm_msg = llm_response(coin_data, message)
|
48 |
+
|
49 |
+
# Grafik oluştur
|
50 |
+
chart_image = plot_coin_chart(message)
|
51 |
+
|
52 |
+
return llm_msg, chart_image
|
53 |
+
else:
|
54 |
+
return "Coin bulunamadı.", None
|
55 |
+
|
56 |
+
def search_coin_by_name(name, currency="usd"):
|
57 |
+
coin_list = get_coin_list(currency)
|
58 |
+
for coin in coin_list:
|
59 |
+
if name.lower() in coin['name'].lower() or name.lower() in coin['id'].lower():
|
60 |
+
return coin
|
61 |
+
return None
|
62 |
+
|
63 |
+
def llm_response(coin_data, coin_name):
|
64 |
+
# LLM sistem mesajı ve coin verilerini kullanarak anlamlı bir cevap üretme
|
65 |
+
response = f"{coin_name} ile ilgili bilgiler:\n"
|
66 |
+
response += f"Fiyat: {coin_data['current_price']} USD\n"
|
67 |
+
response += f"Piyasa Değeri: {coin_data['market_cap']} USD\n"
|
68 |
+
response += f"24 Saatlik Değişim: {coin_data['price_change_percentage_24h']}%\n"
|
69 |
+
return response
|
70 |
+
|
71 |
+
def plot_coin_chart(id, days=30, currency="usd"):
|
72 |
+
historical_data = get_historical_chart(id, days, currency)
|
73 |
+
prices = historical_data['prices']
|
74 |
+
timestamps = [datetime.datetime.utcfromtimestamp(price[0] / 1000) for price in prices]
|
75 |
+
price_values = [price[1] for price in prices]
|
76 |
+
|
77 |
+
plt.figure(figsize=(10, 5))
|
78 |
+
plt.plot(timestamps, price_values)
|
79 |
+
plt.title(f"{id} Coin {days} Günlük Fiyat Grafiği")
|
80 |
+
plt.xlabel('Tarih')
|
81 |
+
plt.ylabel(f'{currency.upper()} Fiyatı')
|
82 |
+
plt.xticks(rotation=45)
|
83 |
+
plt.tight_layout()
|
84 |
+
plt.savefig("coin_chart.png") # Grafiği kaydediyoruz
|
85 |
+
return "coin_chart.png" # Grafiğin yolunu döndürüyoruz
|
86 |
+
|
87 |
+
# Gradio UI
|
88 |
chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="Start chatting!", likeable=True, layout="panel")
|
|
|
89 |
|
90 |
demo = gr.ChatInterface(
|
91 |
fn=respond,
|
|
|
94 |
chatbot=chatbot,
|
95 |
theme="Nymbo/Nymbo_Theme",
|
96 |
)
|
|
|
97 |
|
98 |
with demo:
|
99 |
# No need for system message input, model selection, or sliders
|
100 |
pass
|
101 |
|
|
|
|
|
102 |
if __name__ == "__main__":
|
|
|
103 |
demo.launch()
|