solanaexpert commited on
Commit
1e81e10
·
verified ·
1 Parent(s): e0ff5fb

Update MLCryptoForecasterAllAssetsTPSL.py

Browse files
Files changed (1) hide show
  1. MLCryptoForecasterAllAssetsTPSL.py +78 -73
MLCryptoForecasterAllAssetsTPSL.py CHANGED
@@ -8,117 +8,122 @@ from sklearn.ensemble import RandomForestClassifier
8
  from sklearn.metrics import classification_report
9
  import ta
10
 
11
- # Initialize Binance client (insert API keys if needed)
 
 
 
 
 
 
12
  client = Client()
13
 
14
  # Settings
15
  interval = Client.KLINE_INTERVAL_4HOUR
 
 
 
 
 
 
16
  symbols = [s['symbol'] for s in client.get_exchange_info()['symbols']
17
  if s['status']=='TRADING' and s['quoteAsset']=='USDT']
18
 
19
- def process_symbol(symbol):
20
- data_file = f"{symbol}_data_4h_full.csv"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  # Load or download data
 
22
  if os.path.exists(data_file):
23
  df = pd.read_csv(data_file, index_col=0, parse_dates=True)
24
- # Normalize volume column name
25
- if 'volume' in df.columns:
26
- df.rename(columns={'volume':'vol'}, inplace=True)
27
  last_ts = df.index[-1]
28
  start = (last_ts + timedelta(hours=4)).strftime("%d %B %Y %H:%M:%S")
29
  new = client.get_historical_klines(symbol, interval, start)
30
  if new:
31
- new_df = pd.DataFrame(new, columns=['ts','open','high','low','close','vol',
32
- 'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore'])
33
- new_df = new_df[['ts','open','high','low','close','vol']].astype({k:float for k in ['open','high','low','close','vol']})
34
- new_df['ts'] = pd.to_datetime(new_df['ts'], unit='ms')
35
- new_df.set_index('ts', inplace=True)
36
  df = pd.concat([df, new_df]).drop_duplicates()
37
  df.to_csv(data_file)
38
  else:
39
- klines = client.get_historical_klines(symbol, interval, "01 Dec 2021")
40
- df = pd.DataFrame(klines, columns=['ts','open','high','low','close','vol',
41
  'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore'])
42
- df = df[['ts','open','high','low','close','vol']].astype({k:float for k in ['open','high','low','close','vol']})
43
- df['ts'] = pd.to_datetime(df['ts'], unit='ms')
44
- df.set_index('ts', inplace=True)
45
  df.to_csv(data_file)
46
 
47
- # Standardize volume if still present as 'volume'
48
- if 'volume' in df.columns:
49
- df.rename(columns={'volume':'vol'}, inplace=True)
50
-
51
- # Feature Engineering
52
  df['rsi'] = ta.momentum.RSIIndicator(df['close'], window=14).rsi()
53
  df['macd'] = ta.trend.MACD(df['close']).macd()
54
- for s in [10, 20, 50, 100]:
55
- df[f'ema_{s}'] = df['close'].ewm(span=s).mean()
56
- for w in [10, 20, 50, 100]:
57
- df[f'sma_{w}'] = df['close'].rolling(window=w).mean()
58
  bb = ta.volatility.BollingerBands(df['close'], window=20, window_dev=2)
59
  df['bbw'] = (bb.bollinger_hband() - bb.bollinger_lband()) / bb.bollinger_mavg()
60
  df['atr'] = ta.volatility.AverageTrueRange(df['high'], df['low'], df['close'], window=14).average_true_range()
61
  df['adx'] = ta.trend.ADXIndicator(df['high'], df['low'], df['close'], window=14).adx()
62
  st = ta.momentum.StochasticOscillator(df['high'], df['low'], df['close'], window=14)
63
- df['st_k'] = st.stoch()
64
- df['st_d'] = st.stoch_signal()
65
  df['wr'] = ta.momentum.WilliamsRIndicator(df['high'], df['low'], df['close'], lbp=14).williams_r()
66
  df['cci'] = ta.trend.CCIIndicator(df['high'], df['low'], df['close'], window=20).cci()
67
  df['mom'] = df['close'] - df['close'].shift(10)
68
  ichi = ta.trend.IchimokuIndicator(df['high'], df['low'], window1=9, window2=26, window3=52)
69
- df['span_a'] = ichi.ichimoku_a()
70
- df['span_b'] = ichi.ichimoku_b()
71
  df.dropna(inplace=True)
72
 
73
- # Trend labels
74
- df['signal'] = np.select(
75
- [(df['close'] > df['span_a']) & (df['close'] > df['span_b']),
76
- (df['close'] < df['span_a']) & (df['close'] < df['span_b'])],
77
- [1, 0],
78
- default=-1
79
- )
80
 
81
- # Train/Test
82
- features = df.drop(columns=['open', 'high', 'low', 'close', 'vol', 'signal']).columns
83
  X, y = df[features], df['signal']
84
  Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.2, shuffle=False)
85
- mdl = RandomForestClassifier(n_estimators=200, class_weight='balanced', random_state=42)
86
- mdl.fit(Xtr, ytr)
87
- yp = mdl.predict(Xte)
88
- print(f"=== {symbol} ===")
89
- print(classification_report(yte, yp, zero_division=0))
90
 
91
- # Backtest for optimal TP/SL
92
- def optimize_tp_sl(df, signals, side, pgrid, lgrid):
93
- best = (0, 0, -np.inf)
94
- prices = df['close'].values
95
- idxs = np.where(signals == side)[0]
96
- for tp in pgrid:
97
- for sl in lgrid:
98
- rets = []
99
- for i in idxs:
100
- entry = prices[i]
101
- for j in range(i+1, min(i+11, len(prices))):
102
- ret = ((prices[j] - entry) / entry) if side == 1 else ((entry - prices[j]) / entry)
103
- if ret >= tp or ret <= -sl:
104
- rets.append(np.sign(ret) * min(abs(ret), max(tp, sl)))
105
- break
106
- if rets:
107
- avg_ret = np.mean(rets)
108
- if avg_ret > best[2]:
109
- best = (tp, sl, avg_ret)
110
- return best
111
 
112
- hist_signals = pd.Series(mdl.predict(X), index=X.index)
113
- pgrid = np.arange(0.01, 0.1, 0.01)
114
- lgrid = np.arange(0.01, 0.1, 0.01)
115
- up_tp, up_sl, _ = optimize_tp_sl(df, hist_signals.values, 1, pgrid, lgrid)
116
- dn_tp, dn_sl, _ = optimize_tp_sl(df, hist_signals.values, 0, pgrid, lgrid)
117
- print(f"Optimal UP TP/SL: +{up_tp*100:.1f}% / -{up_sl*100:.1f}%")
118
- print(f"Optimal DN TP/SL: +{dn_tp*100:.1f}% / -{dn_sl*100:.1f}%")
119
 
120
  for sym in symbols:
121
- try:
122
- process_symbol(sym)
123
  except Exception as e:
124
- print(f"Error {sym}: {e}")
 
8
  from sklearn.metrics import classification_report
9
  import ta
10
 
11
+ # Function to log results to both console and file
12
+ def log_results(message, filename="predictions_results.txt"):
13
+ print(message)
14
+ with open(filename, "a") as f:
15
+ f.write(message + "\n")
16
+
17
+ # Initialize Binance client
18
  client = Client()
19
 
20
  # Settings
21
  interval = Client.KLINE_INTERVAL_4HOUR
22
+ result_file = "predictions_results.txt"
23
+ # Initialize result file
24
+ if not os.path.exists(result_file):
25
+ with open(result_file, "w") as f:
26
+ f.write("Asset,Accuracy,Optimal_UP_TP,Optimal_UP_SL,Optimal_DN_TP,Optimal_DN_SL\n")
27
+
28
  symbols = [s['symbol'] for s in client.get_exchange_info()['symbols']
29
  if s['status']=='TRADING' and s['quoteAsset']=='USDT']
30
 
31
+ def optimize_tp_sl(df, signals, side, pgrid, lgrid):
32
+ best = (0, 0, -np.inf)
33
+ prices = df['close'].values
34
+ idxs = np.where(signals == side)[0]
35
+ for tp in pgrid:
36
+ for sl in lgrid:
37
+ rets = []
38
+ for i in idxs:
39
+ entry = prices[i]
40
+ for j in range(i+1, min(i+11, len(prices))):
41
+ if side == 1:
42
+ ret = (prices[j] - entry) / entry
43
+ else:
44
+ ret = (entry - prices[j]) / entry
45
+ if ret >= tp or ret <= -sl:
46
+ rets.append(np.sign(ret) * min(abs(ret), max(tp, sl)))
47
+ break
48
+ if rets:
49
+ avg_ret = np.mean(rets)
50
+ if avg_ret > best[2]:
51
+ best = (tp, sl, avg_ret)
52
+ return best
53
+
54
+ for symbol in symbols:
55
+ log_results(f"=== {symbol} ===", result_file)
56
  # Load or download data
57
+ data_file = f"{symbol}_data_4h_full.csv"
58
  if os.path.exists(data_file):
59
  df = pd.read_csv(data_file, index_col=0, parse_dates=True)
 
 
 
60
  last_ts = df.index[-1]
61
  start = (last_ts + timedelta(hours=4)).strftime("%d %B %Y %H:%M:%S")
62
  new = client.get_historical_klines(symbol, interval, start)
63
  if new:
64
+ new_df = pd.DataFrame(new, columns=['timestamp','open','high','low','close','volume',
65
+ 'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore'])
66
+ new_df = new_df[['timestamp','open','high','low','close','volume']].astype(float)
67
+ new_df['timestamp'] = pd.to_datetime(new_df['timestamp'], unit='ms')
68
+ new_df.set_index('timestamp', inplace=True)
69
  df = pd.concat([df, new_df]).drop_duplicates()
70
  df.to_csv(data_file)
71
  else:
72
+ klines = client.get_historical_klines(symbol, interval, "01 December 2021")
73
+ df = pd.DataFrame(klines, columns=['timestamp','open','high','low','close','volume',
74
  'close_time','quote_av','trades','tb_base_av','tb_quote_av','ignore'])
75
+ df = df[['timestamp','open','high','low','close','volume']].astype(float)
76
+ df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
77
+ df.set_index('timestamp', inplace=True)
78
  df.to_csv(data_file)
79
 
80
+ # Indicators
 
 
 
 
81
  df['rsi'] = ta.momentum.RSIIndicator(df['close'], window=14).rsi()
82
  df['macd'] = ta.trend.MACD(df['close']).macd()
83
+ for s in [10,20,50,100]: df[f'ema_{s}'] = df['close'].ewm(span=s).mean()
84
+ for w in [10,20,50,100]: df[f'sma_{w}'] = df['close'].rolling(window=w).mean()
 
 
85
  bb = ta.volatility.BollingerBands(df['close'], window=20, window_dev=2)
86
  df['bbw'] = (bb.bollinger_hband() - bb.bollinger_lband()) / bb.bollinger_mavg()
87
  df['atr'] = ta.volatility.AverageTrueRange(df['high'], df['low'], df['close'], window=14).average_true_range()
88
  df['adx'] = ta.trend.ADXIndicator(df['high'], df['low'], df['close'], window=14).adx()
89
  st = ta.momentum.StochasticOscillator(df['high'], df['low'], df['close'], window=14)
90
+ df['st_k'] = st.stoch(); df['st_d'] = st.stoch_signal()
 
91
  df['wr'] = ta.momentum.WilliamsRIndicator(df['high'], df['low'], df['close'], lbp=14).williams_r()
92
  df['cci'] = ta.trend.CCIIndicator(df['high'], df['low'], df['close'], window=20).cci()
93
  df['mom'] = df['close'] - df['close'].shift(10)
94
  ichi = ta.trend.IchimokuIndicator(df['high'], df['low'], window1=9, window2=26, window3=52)
95
+ df['span_a'] = ichi.ichimoku_a(); df['span_b'] = ichi.ichimoku_b()
 
96
  df.dropna(inplace=True)
97
 
98
+ # Signal labeling
99
+ df['signal'] = np.select([
100
+ (df['close']>df['span_a'])&(df['close']>df['span_b']),
101
+ (df['close']<df['span_a'])&(df['close']<df['span_b'])], [1,0], default=-1)
 
 
 
102
 
103
+ # Train/test
104
+ features = [c for c in df.columns if c not in ['open','high','low','close','volume','signal']]
105
  X, y = df[features], df['signal']
106
  Xtr, Xte, ytr, yte = train_test_split(X, y, test_size=0.2, shuffle=False)
107
+ model = RandomForestClassifier(n_estimators=200, class_weight='balanced', random_state=42)
108
+ model.fit(Xtr, ytr)
109
+ ypr = model.predict(Xte)
110
+ report = classification_report(yte, ypr, zero_division=0)
111
+ log_results(f"Classification report for {symbol}:\n{report}", result_file)
112
 
113
+ # Predict latest trend
114
+ latest = model.predict(X.iloc[-1:].values)[0]
115
+ trend_map = {1:'Uptrend',0:'Downtrend',-1:'Neutral'}
116
+ log_results(f"Predicted next trend for {symbol}: {trend_map[latest]}", result_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
+ # Optimize TP/SL
119
+ hist_sign = model.predict(X.values)
120
+ pgrid = np.arange(0.01,0.1,0.01); lgrid = np.arange(0.01,0.1,0.01)
121
+ up_tp, up_sl, _ = optimize_tp_sl(df, hist_sign, 1, pgrid, lgrid)
122
+ dn_tp, dn_sl, _ = optimize_tp_sl(df, hist_sign, 0, pgrid, lgrid)
123
+ log_results(f"Optimal UP TP/SL: +{up_tp*100:.1f}% / -{up_sl*100:.1f}%", result_file)
124
+ log_results(f"Optimal DN TP/SL: +{dn_tp*100:.1f}% / -{dn_sl*100:.1f}%", result_file)
125
 
126
  for sym in symbols:
127
+ try: process_symbol(sym)
 
128
  except Exception as e:
129
+ log_results(f"Error processing {sym}: {e}", result_file)