|
import os |
|
import pandas as pd |
|
import numpy as np |
|
from datetime import timedelta |
|
from binance.client import Client |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.ensemble import RandomForestClassifier |
|
from sklearn.metrics import classification_report |
|
import ta |
|
|
|
|
|
|
|
def log_results(message, filename="predictions_results.txt"): |
|
print(message) |
|
with open(filename, "a") as f: |
|
f.write(message + "\n") |
|
|
|
|
|
client = Client() |
|
|
|
|
|
interval = Client.KLINE_INTERVAL_4HOUR |
|
result_file = "predictions_results.txt" |
|
|
|
|
|
if os.path.exists(result_file): |
|
os.remove(result_file) |
|
|
|
|
|
with open(result_file, "w") as f: |
|
f.write("Asset,Time,Price,Prediction,Optimal_UP_TP,Optimal_UP_SL,Optimal_DN_TP,Optimal_DN_SL\n") |
|
|
|
|
|
symbols = [s['symbol'] for s in client.get_exchange_info()['symbols'] |
|
if s['status']=='TRADING' and s['quoteAsset']=='USDT'] |
|
|
|
|
|
def optimize_tp_sl(df, signals, side, pgrid, lgrid): |
|
best = (0, 0, -np.inf) |
|
prices = df['close'].values |
|
idxs = np.where(signals == side)[0] |
|
for tp in pgrid: |
|
for sl in lgrid: |
|
rets = [] |
|
for i in idxs: |
|
entry = prices[i] |
|
for j in range(i+1, min(i+11, len(prices))): |
|
ret = (prices[j] - entry) / entry if side == 1 else (entry - prices[j]) / entry |
|
if ret >= tp or ret <= -sl: |
|
rets.append(np.sign(ret) * min(abs(ret), max(tp, sl))) |
|
break |
|
if rets: |
|
avg_ret = np.mean(rets) |
|
if avg_ret > best[2]: |
|
best = (tp, sl, avg_ret) |
|
return best |
|
|
|
|
|
for symbol in symbols: |
|
log_results(f"=== {symbol} ===", result_file) |
|
|
|
|
|
data_file = f"{symbol}_data_4h_full.csv" |
|
if os.path.exists(data_file): |
|
df = pd.read_csv(data_file, index_col=0, parse_dates=True) |
|
last_ts = df.index[-1] |
|
start = (last_ts + timedelta(hours=4)).strftime("%d %B %Y %H:%M:%S") |
|
new = client.get_historical_klines(symbol, interval, start) |
|
if new: |
|
new_df = pd.DataFrame(new, columns=['timestamp','open','high','low','close','volume', |
|
'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore']) |
|
new_df = new_df[['timestamp','open','high','low','close','volume']].astype(float) |
|
new_df['timestamp'] = pd.to_datetime(new_df['timestamp'], unit='ms') |
|
new_df.set_index('timestamp', inplace=True) |
|
df = pd.concat([df, new_df]).drop_duplicates() |
|
df.to_csv(data_file) |
|
else: |
|
klines = client.get_historical_klines(symbol, interval, "01 December 2021") |
|
df = pd.DataFrame(klines, columns=['timestamp','open','high','low','close','volume', |
|
'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore']) |
|
df = df[['timestamp','open','high','low','close','volume']].astype(float) |
|
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms') |
|
df.set_index('timestamp', inplace=True) |
|
df.to_csv(data_file) |
|
|
|
|
|
df['rsi'] = ta.momentum.RSIIndicator(df['close'], window=14).rsi() |
|
df['macd'] = ta.trend.MACD(df['close']).macd() |
|
for s in [10, 20, 50, 100]: df[f'ema_{s}'] = df['close'].ewm(span=s).mean() |
|
for w in [10, 20, 50, 100]: df[f'sma_{w}'] = df['close'].rolling(window=w).mean() |
|
bb = ta.volatility.BollingerBands(df['close'], window=20, window_dev=2) |
|
df['bbw'] = (bb.bollinger_hband() - bb.bollinger_lband()) / bb.bollinger_mavg() |
|
df['atr'] = ta.volatility.AverageTrueRange(df['high'], df['low'], df['close'], window=14).average_true_range() |
|
df['adx'] = ta.trend.ADXIndicator(df['high'], df['low'], df['close'], window=14).adx() |
|
st = ta.momentum.StochasticOscillator(df['high'], df['low'], df['close'], window=14) |
|
df['st_k'] = st.stoch(); df['st_d'] = st.stoch_signal() |
|
df['wr'] = ta.momentum.WilliamsRIndicator(df['high'], df['low'], df['close'], lbp=14).williams_r() |
|
df['cci'] = ta.trend.CCIIndicator(df['high'], df['low'], df['close'], window=20).cci() |
|
df['mom'] = df['close'] - df['close'].shift(10) |
|
ichi = ta.trend.IchimokuIndicator(df['high'], df['low'], window1=9, window2=26, window3=52) |
|
df['span_a'] = ichi.ichimoku_a(); df['span_b'] = ichi.ichimoku_b() |
|
df.dropna(inplace=True) |
|
|
|
|
|
df['signal'] = np.select([ |
|
(df['close'] > df['span_a']) & (df['close'] > df['span_b']), |
|
(df['close'] < df['span_a']) & (df['close'] < df['span_b']) |
|
], [1, 0], default=-1) |
|
|
|
|
|
features = [c for c in df.columns if c not in ['open', 'high', 'low', 'close', 'volume', 'signal']] |
|
X, y = df[features], df['signal'] |
|
Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.2, shuffle=False) |
|
model = RandomForestClassifier(n_estimators=200, class_weight='balanced', random_state=42) |
|
model.fit(Xtr, ytr) |
|
ypr = model.predict(Xte) |
|
|
|
|
|
report = classification_report(yte, ypr, zero_division=0) |
|
log_results(f"Classification report for {symbol}: {report}", result_file) |
|
|
|
|
|
latest_df = X.iloc[-1:] |
|
trend_label = model.predict(latest_df)[0] |
|
pred_time = df.index[-1] |
|
pred_price = df['close'].iloc[-1] |
|
trend_str = {1:'Uptrend', 0:'Downtrend', -1:'Neutral'}[trend_label] |
|
log_results(f"Time: {pred_time}, Price: {pred_price:.2f}, Prediction: {trend_str}", result_file) |
|
|
|
|
|
hist_sign = model.predict(X) |
|
pgrid = np.arange(0.01, 0.1, 0.01) |
|
lgrid = np.arange(0.01, 0.1, 0.01) |
|
up_tp, up_sl, _ = optimize_tp_sl(df, hist_sign, 1, pgrid, lgrid) |
|
dn_tp, dn_sl, _ = optimize_tp_sl(df, hist_sign, 0, pgrid, lgrid) |
|
log_results(f"Optimal UP TP/SL: +{up_tp*100:.1f}% / -{up_sl*100:.1f}%", result_file) |
|
log_results(f"Optimal DN TP/SL: +{dn_tp*100:.1f}% / -{dn_sl*100:.1f}%", result_file) |
|
|
|
|
|
with open(result_file, "a") as f: |
|
f.write("\n") |
|
|
|
|
|
log_results("All assets processed.", result_file) |
|
|