yifanxie commited on
Commit
e863dee
·
1 Parent(s): 7818025

improve stake overview UI, and integrate FNCV3

Browse files
Files changed (2) hide show
  1. dash/numerdash_app.py +171 -121
  2. project_tools/numerapi_utils.py +47 -33
dash/numerdash_app.py CHANGED
@@ -98,7 +98,7 @@ def default_model_picker():
98
  return picked_models
99
 
100
 
101
- def model_fast_picker(models):
102
  text_content = '''
103
  fast model picker by CSV string.
104
  example: "model1, model2, model3"
@@ -109,7 +109,7 @@ def model_fast_picker(models):
109
  csv_parts = text.split(',')
110
  for s in csv_parts:
111
  m = s.strip()
112
- if m not in models:
113
  result_models.append(m)
114
  return list(dict.fromkeys(result_models))
115
 
@@ -127,10 +127,11 @@ def generate_round_table(data, row_cts, c, r, sortcol='corrmmc'):
127
  select_round = row_cts[c].slider('select a round', earliest_round, latest_round, suggest_round, 1)
128
  # row_cts[c].write(select_round)
129
  round_data = data[data['roundNumber']==select_round].sort_values(by=sortcol, ascending=False).reset_index(drop=True)
 
130
  # round_data = round_data[round_data['model'].isin(models)].reset_index(drop=True)
131
- latest_date = round_data['date'].values[0]
132
- row_cts[c].write(f'round: {select_round}, date: {latest_date}')
133
- row_cts[c].dataframe(round_data.drop(['roundNumber', 'date'], axis=1), height=max_table_height-100)
134
 
135
 
136
 
@@ -148,10 +149,10 @@ def generate_dailyscore_metrics(data, row_cts, c, r):
148
  pass
149
 
150
  def get_roundmetric_data(data):
151
- numfeats1 = ['corr', 'mmc', 'corrmmc', 'corr2mmc']
152
  stat1 = ['sum', 'mean', 'count',
153
  {'sharpe': project_utils.get_array_sharpe}] # {'ptp':np.ptp}]#{'sharp':project_utils.get_array_sharpe}]
154
- numfeats2 = ['corr_pct', 'mmc_pct', 'cmavg_pct', 'c2mavg_pct']
155
  stat2 = ['mean']#, {'sharp': project_utils.get_array_sharpe}]
156
 
157
  roundmetric_agg_rcp = [
@@ -175,19 +176,20 @@ def generate_round_metrics(data, row_cts, c, r):
175
  # st.write(data.columns.tolist())
176
  for col in data.columns.tolist():
177
  if select_metric =='corrmmc':
178
- if (f'{select_metric}_' in col) or ('cmavg_' in col):
179
  cols += [col]
180
- elif select_metric =='corr2mmc':
181
- if (f'{select_metric}_' in col) or ('c2mavg_' in col):
182
  cols += [col]
183
  else:
184
- if (f'{select_metric}_' in col) and (not('corrmmc' in col)) and (not('corr2mmc' in col)):
 
185
  cols+= [col]
186
 
187
  if select_metric != 'pct':
188
  sort_col = select_metric+'_sharpe'
189
  else:
190
- sort_col = 'cmavg_pct_mean'
191
  view_data = data[cols].sort_values(by=sort_col, ascending=False)
192
  row_cts[c].dataframe(view_data)
193
  pass
@@ -237,7 +239,7 @@ def round_view(data, select_perview, select_metric=None):
237
  generate_round_table(data, row_cts, c, r)
238
  if select_perview=='dailyscore_metric':
239
  generate_dailyscore_metrics(data, row_cts, c, r)
240
- if select_perview=='round_metric':
241
  generate_round_metrics(data, row_cts, c, r)
242
  if select_perview=='dailyscore_chart':
243
  dailyscore_chart(data, row_cts, c, r, select_metric)
@@ -246,26 +248,50 @@ def round_view(data, select_perview, select_metric=None):
246
 
247
 
248
  def score_overview():
249
- models = []
250
- data = []
251
- benchmark_opt = st.sidebar.checkbox('download default models', value=True)
252
-
253
- model_selection = st.empty()
254
- if benchmark_opt:
255
- model_dict = default_model_picker()
256
- for k in model_dict.keys():
257
- models += model_dict[k]
258
- models = models + model_fast_picker(models)
259
- # if len(models)>0:
260
- # model_selection = st.sidebar.multiselect('select models', models, default=models)
261
- st.sidebar.subheader('Choose a Table View')
262
- select_perview = st.sidebar.selectbox("", list(tbl_opt.keys()), index=0, format_func=lambda x: tbl_opt[x])
263
- if len(models)>0:
264
- model_selection.multiselect('selected models', models, default=models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
 
267
  def data_operation():
268
  # top_lb, top_tp3m, top_tp1y, special_list = sidebar_data_picker()
 
269
  latest_round = project_utils.latest_round
270
  models = []
271
  benchmark_opt = st.sidebar.checkbox('download default models', value=True)
@@ -273,9 +299,9 @@ def data_operation():
273
  model_dict = default_model_picker()
274
  for k in model_dict.keys():
275
  models += model_dict[k]
276
- models = models + model_fast_picker(models)
277
  if len(models)>0:
278
- model_selection = st.multiselect('select models', models, default=models)
279
  suggest_min_round = 182 #latest_round-50
280
  min_round, max_round = st.slider('select tournament rounds', 200, latest_round, (suggest_min_round, latest_round), 1)
281
  roundlist = [i for i in range(max_round, min_round-1, -1)]
@@ -297,13 +323,16 @@ def data_operation():
297
  if prjreload:
298
  project_utils.reload_project()
299
  if len(model_df)>0:
300
- rename_dict = {'corrPercentile': 'corr_pct', 'correlation':'corr', 'corrWMetamodel':'corr_meta', 'mmcPercentile':'mmc_pct', 'tcPercentile':'tc_pct'}
301
  model_df.rename(columns=rename_dict, inplace=True)
302
  model_df['corrmmc'] = model_df['corr'] + model_df['mmc']
303
- model_df['corr2mmc'] = model_df['corr'] + 2*model_df['mmc']
304
- model_df['cmavg_pct'] = (model_df['corr_pct'] + model_df['mmc_pct'])/2
305
- model_df['c2mavg_pct'] = (model_df['corr_pct'] + 2*model_df['mmc_pct'])/3
306
- ord_cols = ['model','corr', 'corr_pct', 'mmc', 'mmc_pct', 'corrmmc', 'cmavg_pct', 'corr_meta', 'tc', 'tc_pct', 'corr2mmc','c2mavg_pct', 'roundNumber']
 
 
 
307
  model_df = model_df[ord_cols]
308
  if project_config.SAVE_LOCAL_COPY:
309
  project_utils.pickle_data(project_config.MODEL_ROUND_RESULT_FILE, model_df)
@@ -313,8 +342,9 @@ def data_operation():
313
  st.text('list of models being tracked')
314
  st.write(model_dict)
315
  try:
316
- st.write(st.session_state['model_data'].shape)
317
- st.write(model_df.head(5))
 
318
  except:
319
  st.write('model data was not retrieved')
320
 
@@ -373,6 +403,8 @@ def download_model_round_result(models, roundlist, show_info):
373
  time.sleep(0.1)
374
  my_bar.progress(percent_complete)
375
  model_df = pd.concat(model_dfs, axis=0).sort_values(by=['roundNumber'], ascending=False).reset_index(drop=True)
 
 
376
  model_df = model_df[model_df['roundNumber'].isin(roundlist)].reset_index(drop=True)
377
  return model_df
378
 
@@ -396,9 +428,6 @@ def roundresult_chart(data, model_selection):
396
  min_selectround, max_selectround = st.slider('select plotting round range', min_round, max_round,
397
  (suggest_min_round, max_round), 1)
398
 
399
- # min_selectround, max_selectround = placeholder.slider('select plotting round range', min_round, max_round,
400
- # (suggest_min_round, max_round), 1)
401
-
402
  select_metric = st.selectbox('Choose a metric', list(histtrend_opt.keys()), index=0,
403
  format_func=lambda x: histtrend_opt[x])
404
  round_range = [min_selectround, max_selectround]
@@ -409,17 +438,11 @@ def roundresult_chart(data, model_selection):
409
  mean_df['mean'] = mean_df[select_metric]
410
  merge_cols = ['model', 'model avg.', 'mean']
411
  round_data = round_data.merge(right=mean_df[merge_cols], on='model', how='left').sort_values(by=['mean','model', 'roundNumber'], ascending=False)
412
- fig = chart_pxline(round_data, 'roundNumber', y=select_metric, color='model avg.', hover_data=list(histtrend_opt.keys())+['date'],x_range=round_range)
413
-
414
  if fig is not None:
415
  st.plotly_chart(fig, use_container_width=True)
416
- dailyscore_data = data[(data['model'].isin(model_selection)) & data['roundNumber'].isin(round_list)].reset_index(drop=True)
417
- dailyscore_data = dailyscore_data.merge(right=mean_df[merge_cols], on='model', how='left').sort_values(
418
- by=['mean', 'model', 'roundNumber'], ascending=False)
419
- round_view(dailyscore_data, 'dailyscore_chart', select_metric)
420
 
421
- else:
422
- st.text(f'No data available for models: {models}')
423
 
424
 
425
 
@@ -427,22 +450,16 @@ def roundresult_chart(data, model_selection):
427
  def histtrend():
428
  # default_models = ['yxbot']
429
  # models = default_models.copy()
430
- models = []
 
431
  model_selection = []
432
- model_dict = model_data_picker(values=[False, False, False, False, True, True])
433
- for k in model_dict.keys():
434
- if model_dict[k] not in models:
435
- models += model_dict[k]
436
-
437
  default_models = model_fast_picker(models)
438
  if len(models)>0:
439
  if len(default_models)==0:
440
  default_models = [models[0]]
441
  model_selection = st.sidebar.multiselect('select models for chart', models, default=default_models)
442
 
443
-
444
- if os.path.isfile(project_config.DASHBOARD_MODEL_RESULT_FILE) and len(model_selection)>0:
445
- data = project_utils.load_data(project_config.DASHBOARD_MODEL_RESULT_FILE)
446
  roundresult_chart(data, model_selection)
447
 
448
  # fig = px.line(df, x='roundNumber', y='corr', color='model', hover_data=['corr_pct'])
@@ -458,31 +475,22 @@ def histtrend():
458
 
459
 
460
  def model_evaluation():
461
- models = []
 
462
  model_selection = []
463
- model_dict = model_data_picker_bak(values=[True, True, True, True, True, True])
464
  mean_scale = [-0.05, 0.1]
465
  count_scale = [1, 50]
466
- sharpe_scale = [-0.2, 3]
467
  pct_scale = [0, 1]
468
  radar_scale = [0, 5]
469
 
470
- for k in model_dict.keys():
471
- if model_dict[k] not in models:
472
- models += model_dict[k]
473
-
474
- default_models = model_fast_picker(models)
475
  if len(models)>0:
476
  if len(default_models)==0:
477
- if 'integration_test' in models:
478
- default_models = 'integration_test'
479
- else:
480
- default_models = [models[0]]
481
-
482
  model_selection = st.sidebar.multiselect('select models for chart', models, default=default_models)
483
 
484
- if os.path.isfile(project_config.DASHBOARD_MODEL_RESULT_FILE) and len(model_selection)>0:
485
- data = project_utils.load_data(project_config.DASHBOARD_MODEL_RESULT_FILE)
486
  round_data = data[data['model'].isin(model_selection)].drop_duplicates(['model', 'roundNumber'],keep='first').reset_index(drop=True)
487
  min_round = int(round_data['roundNumber'].min())
488
  max_round = int(round_data['roundNumber'].max())
@@ -493,18 +501,17 @@ def model_evaluation():
493
  min_selectround, max_selectround = st.slider('select plotting round range', min_round, max_round,
494
  (suggest_min_round, max_round), 1)
495
  round_list = [r for r in range(min_selectround, max_selectround+1)]
496
- defaultlist = ['corr_sharpe', 'mmc_sharpe', 'corr2mmc_sharpe','corr_mean', 'mmc_mean', 'corr2mmc_mean', 'count']
 
 
497
 
498
  select_metrics = st.multiselect('Metric Selection', list(model_eval_opt.keys()),
499
  format_func=lambda x: model_eval_opt[x], default=defaultlist)
500
 
501
- use_dailymetrics = ('id_corr_sharpe' in select_metrics) or (('id_mmc_sharpe' in select_metrics)) or ('id_corrmmc_sharpe' in select_metrics)
502
- if use_dailymetrics:
503
- st.write('use daily metrics')
504
 
505
  round_data = round_data[round_data['roundNumber'].isin(round_list)].reset_index(drop=True)
506
  #'need normalised radar chart + tabular view here
507
- roundmetric_df = get_roundmetric_data(round_data).sort_values(by='corrmmc_sharpe', ascending=False).reset_index(drop=True)
508
 
509
  radarmetric_df = roundmetric_df.copy(deep=True)
510
  for col in select_metrics:
@@ -574,7 +581,7 @@ def get_portfolio_overview(models, onlylatest=True):
574
  try:
575
  res_df = pd.concat(res_df, axis=0)
576
  res_df['profitability'] = res_df['realised_pl']/(res_df['current_stake']-res_df['realised_pl'])
577
- cols = ['model', 'date', 'current_stake', 'floating_stake', 'floating_pl', 'realised_pl', 'profitability']
578
 
579
  # res_df['date'] = res_df['date'].dt.date
580
  if onlylatest:
@@ -684,17 +691,23 @@ def check_session_state(key):
684
 
685
 
686
  def stake_overview():
687
- models = []
 
688
  model_selection = []
689
- model_dict = model_data_picker_bak(values=[True, True, True, True, True, True])
 
690
  for k in model_dict.keys():
691
- if model_dict[k] not in models:
692
- models += model_dict[k]
693
 
694
  default_models = model_fast_picker(models)
 
695
  if len(models)>0:
 
 
696
  model_selection = st.sidebar.multiselect('select models for chart', models, default=default_models)
 
697
  redownload_data = False
 
698
  if len(model_selection) > 0:
699
  if 'stake_df' not in st.session_state:
700
  redownload_data = True
@@ -705,34 +718,34 @@ def stake_overview():
705
  ovdf = st.session_state['stake_df']
706
  if redownload_data:
707
  ovdf = get_portfolio_overview(model_selection, onlylatest=False)
708
- print(ovdf.shape)
709
  st.session_state['stake_df'] = ovdf
710
  st.session_state['stake_overview_models'] = set(ovdf['model'].unique().tolist())
711
 
712
  chartdf = ovdf.copy(deep=True)
713
  ovdf = ovdf.drop_duplicates('model', keep='first')
714
  ovdf = ovdf.sort_values(by='floating_pl', ascending=False).reset_index(drop=True)
715
- if len(ovdf)>0:
716
- numerai_date = str(ovdf['date'].values[0])[0:10]
 
717
  ovdf.drop(['date'], axis=1, inplace=True)
718
  stake_cts = st.columns(2)
719
  pl_cts = st.columns(2)
720
  date_label = st.empty()
721
  get_stake_graph(chartdf)
722
- ovdf_exp = st.expander('', expanded=True)
723
  with ovdf_exp:
724
- st.dataframe(ovdf, height=max_table_height)
725
  total_current_stake = round(ovdf['current_stake'].sum(), 3)
726
  total_floating_stake = round(ovdf['floating_stake'].sum(), 3)
727
  rpl = round(ovdf['realised_pl'].sum(), 3)
728
  fpl = round(ovdf['floating_pl'].sum(), 3)
729
  current_stake_str = f'### Stake Balance: {total_current_stake:0.3f} NMR'
730
- float_stake_str = f'### Floating Balance: {total_floating_stake:0.3f} NMR'
731
- if rpl>=0:
732
  real_pl_color = 'green'
733
  else:
734
  real_pl_color = 'red'
735
- if fpl>=0:
736
  float_pl_color = 'green'
737
  else:
738
  float_pl_color = 'red'
@@ -742,15 +755,36 @@ def stake_overview():
742
  stake_cts[1].markdown(float_stake_str, unsafe_allow_html=True)
743
  pl_cts[0].markdown(real_pl_str, unsafe_allow_html=True)
744
  pl_cts[1].markdown(float_pl_str, unsafe_allow_html=True)
745
- date_label.subheader(f'Date: {numerai_date}')
746
- if st.button('show breakdown by live rounds'):
747
- liveround_exp = st.expander('',expanded=True)
748
- with liveround_exp:
749
- stake_models = ovdf['model'].tolist()
750
- liveround_stake_df = get_stake_by_liverounds(stake_models)
751
- # st.write(liveround_stake_df)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752
 
753
- round_view(liveround_stake_df,'live_round_stake')
754
 
755
 
756
 
@@ -766,12 +800,14 @@ def app_setting():
766
 
767
 
768
  def performance_overview():
 
769
  select_app = st.sidebar.selectbox("", list(pfm_opt.keys()), index=0, format_func=lambda x: pfm_opt[x])
770
  if select_app=='data_op':
771
  data_operation()
772
-
773
- if select_app=='performance_overview':
774
- performance_overview()
 
775
  if select_app=='historic_trend':
776
  histtrend()
777
  if select_app=='model_evaluation':
@@ -795,13 +831,14 @@ def show_content():
795
  app_opt = {
796
  'performance_overview' : 'Performance Overview',
797
  'stake_overview': 'Stake Overview',
798
- 'app_setting':''
799
  }
800
 
801
 
802
  pfm_opt = {
803
  'data_op': 'Download Score Data',
804
- 'liveround_view': 'Live Round Overview',
 
805
  'historic_trend': 'Historic Trend',
806
  'model_evaluation': 'Model Evaluation',
807
  }
@@ -839,23 +876,26 @@ id_metric_score_dic = {
839
 
840
 
841
  roundmetric_opt ={'corr':'Corr metrics',
842
- 'mmc' : 'MMC metrics',
 
 
 
843
  'corrmmc' : 'CorrMMC metrics',
844
- 'corr2mmc' : 'Corr2MMC metrics',
845
- 'pct' : 'Pecentage metrics'
846
-
847
  }
848
 
849
 
850
  histtrend_opt = {
851
  'corr':'Correlation',
852
  'mmc': 'MMC',
853
- 'corrmmc': 'Correlation+MMC',
854
- 'corr2mmc': 'Correlation+2*MMC',
855
  'corr_pct': 'Correlation Percentile',
 
856
  'mmc_pct':'MMC Percentile',
857
- 'cmavg_pct': 'Correlation+MMC Average Percentile',
858
- 'c2mavg_pct': 'Correlation+2*MMC Average Percentile',
 
 
859
 
860
  }
861
 
@@ -863,20 +903,19 @@ histtrend_opt = {
863
  model_eval_opt = {
864
  'corr_sharpe' : 'Correlation Sharpe',
865
  'mmc_sharpe' : 'MMC Sharpe',
 
 
866
  'corrmmc_sharpe' : 'Correlation+MMC Sharpe',
867
- 'corr2mmc_sharpe': 'Correlation+2*MMC Sharpe',
868
  'corr_mean':'Avg. Correlation',
869
- 'mmc_mean':'Avg. MMC',
870
  'count': 'Number of Rounds',
 
 
871
  'corrmmc_mean': 'Avg. Correlation+MMC',
872
- 'corr2mmc_mean': 'Avg. Correlation+2*MMC',
873
  'corr_pct_mean': 'Avg. Correlation Percentile',
874
  'mmc_pct_mean': 'Avg. MMC Percentile',
875
- 'cmavg_pct_mean': 'Avg. Correlation+MMC Percentile',
876
- 'c2mavg_pct_mean': 'Avg. Correlation+2*MMC Percentile',
877
- 'id_corr_sharpe': 'Daily Score corr sharpe',
878
- 'id_mmc_sharpe': 'Daily Score mmc sharpe',
879
- 'id_corrmmc_sharpe': 'Daily Score corrmmc sharpe',
880
  }
881
 
882
  stakeoverview_plot_opt = {
@@ -889,10 +928,16 @@ stakeoverview_plot_opt = {
889
  def show_session_status_info():
890
  # 'raw_performance_data'
891
  key1 = 'model_data'
 
892
  if check_session_state(key1) is None:
893
  st.write(f'{key1} is None')
894
  else:
895
  st.write(f'{key1} shape is {st.session_state[key1].shape}')
 
 
 
 
 
896
  pass
897
 
898
 
@@ -904,8 +949,8 @@ with height_exp:
904
  max_height = st.slider('Please choose the height for plots', 100, 1000, 400, 50)
905
  max_table_height = st.slider('Please choose the height for tables', 100, 1000, 500, 50)
906
 
907
- st.title('Numerai Dashboard')
908
 
 
909
  # key = 'pfm_default_model'
910
  # if check_session_state('pfm_default_model') is None:
911
  # st.write('set value')
@@ -916,6 +961,11 @@ st.title('Numerai Dashboard')
916
  # st.write(st.session_state)
917
 
918
  df = get_saved_data()
 
 
 
 
 
919
  show_session_status_info()
920
  # st.write(f'{key} is {chkval}')
921
 
 
98
  return picked_models
99
 
100
 
101
+ def model_fast_picker(model_list):
102
  text_content = '''
103
  fast model picker by CSV string.
104
  example: "model1, model2, model3"
 
109
  csv_parts = text.split(',')
110
  for s in csv_parts:
111
  m = s.strip()
112
+ if (m in model_list): #and (m not in preselected_models):
113
  result_models.append(m)
114
  return list(dict.fromkeys(result_models))
115
 
 
127
  select_round = row_cts[c].slider('select a round', earliest_round, latest_round, suggest_round, 1)
128
  # row_cts[c].write(select_round)
129
  round_data = data[data['roundNumber']==select_round].sort_values(by=sortcol, ascending=False).reset_index(drop=True)
130
+ round_resolved_time = round_data['roundResolveTime'][0]
131
  # round_data = round_data[round_data['model'].isin(models)].reset_index(drop=True)
132
+ # latest_date = round_data['date'].values[0]
133
+ row_cts[c].write(f'round: {select_round} resolved time: {round_resolved_time}')
134
+ row_cts[c].dataframe(round_data.drop(['roundNumber', 'roundResolveTime'], axis=1), height=max_table_height-100)
135
 
136
 
137
 
 
149
  pass
150
 
151
  def get_roundmetric_data(data):
152
+ numfeats1 = ['corr', 'mmc', 'tc', 'corrmmc', 'corrtc', 'fncV3', 'fncV3_pct']
153
  stat1 = ['sum', 'mean', 'count',
154
  {'sharpe': project_utils.get_array_sharpe}] # {'ptp':np.ptp}]#{'sharp':project_utils.get_array_sharpe}]
155
+ numfeats2 = ['corr_pct', 'mmc_pct', 'tc_pct','corrtc_avg_pct', 'corrmmc_avg_pct']
156
  stat2 = ['mean']#, {'sharp': project_utils.get_array_sharpe}]
157
 
158
  roundmetric_agg_rcp = [
 
176
  # st.write(data.columns.tolist())
177
  for col in data.columns.tolist():
178
  if select_metric =='corrmmc':
179
+ if (f'{select_metric}_' in col) or ('corrmmc_avg_' in col):
180
  cols += [col]
181
+ elif select_metric =='corrtc':
182
+ if (f'{select_metric}_' in col) or ('corrtc_avg_' in col):
183
  cols += [col]
184
  else:
185
+ # if (f'{select_metric}_' in col) and (not('corrmmc' in col)) and (not('corrtc' in col)):
186
+ if (f'{select_metric}_' in col):
187
  cols+= [col]
188
 
189
  if select_metric != 'pct':
190
  sort_col = select_metric+'_sharpe'
191
  else:
192
+ sort_col = 'corr_pct_mean'
193
  view_data = data[cols].sort_values(by=sort_col, ascending=False)
194
  row_cts[c].dataframe(view_data)
195
  pass
 
239
  generate_round_table(data, row_cts, c, r)
240
  if select_perview=='dailyscore_metric':
241
  generate_dailyscore_metrics(data, row_cts, c, r)
242
+ if select_perview=='metric_view':
243
  generate_round_metrics(data, row_cts, c, r)
244
  if select_perview=='dailyscore_chart':
245
  dailyscore_chart(data, row_cts, c, r, select_metric)
 
248
 
249
 
250
  def score_overview():
251
+ if 'model_data' in st.session_state:
252
+ data = st.session_state['model_data'].copy()
253
+ data = data.drop_duplicates(['model', 'roundNumber'], keep='first')
254
+ roundview = st.expander('round performance overview', expanded=True)
255
+ with roundview:
256
+ round_view(data, 'round_result')
257
+ else:
258
+ st.write('model data missing, please go to the Dowanload Score Data section to download model data first')
259
+
260
+ def metric_overview():
261
+ if 'model_data' in st.session_state:
262
+ data = st.session_state['model_data'].copy()
263
+ st.subheader('Select Round Data')
264
+ latest_round = int(data['roundNumber'].max())
265
+ earliest_round = int(data['roundNumber'].min())
266
+ if (latest_round - earliest_round) > 10:
267
+ # suggest_round = int(latest_round - (latest_round - earliest_round) / 2)
268
+ suggest_round = 280
269
+ else:
270
+ suggest_round = earliest_round
271
+ select_rounds = st.slider('select a round', earliest_round, latest_round, (suggest_round, latest_round - 1), 1)
272
+ data=data.drop_duplicates(['model', 'roundNumber'], keep='first')
273
+ data = data[(data['roundNumber'] >= select_rounds[0]) & (data['roundNumber'] <= select_rounds[1])].reset_index(drop=True)
274
+ roundmetrics_data = get_roundmetric_data(data)
275
+ min_count = int(roundmetrics_data['count'].min())
276
+ max_count = int(roundmetrics_data['count'].max())
277
+ if min_count < max_count:
278
+ select_minround = st.sidebar.slider('miminum number of rounds', min_count, max_count, min_count, 1)
279
+ else:
280
+ select_minround = min_count
281
+ roundmetrics_data = roundmetrics_data[roundmetrics_data['count'] >= select_minround].reset_index(drop=True)
282
+ metricview_exp = st.expander('metric overview', expanded=True)
283
+ dataview_exp = st.expander('full data view', expanded=False)
284
+ with metricview_exp:
285
+ round_view(roundmetrics_data, 'metric_view')
286
+ with dataview_exp:
287
+ st.write(roundmetrics_data)
288
+ else:
289
+ st.write('model data missing, please go to the Dowanload Score Data section to download model data first')
290
 
291
 
292
  def data_operation():
293
  # top_lb, top_tp3m, top_tp1y, special_list = sidebar_data_picker()
294
+ full_model_list = st.session_state['models']
295
  latest_round = project_utils.latest_round
296
  models = []
297
  benchmark_opt = st.sidebar.checkbox('download default models', value=True)
 
299
  model_dict = default_model_picker()
300
  for k in model_dict.keys():
301
  models += model_dict[k]
302
+ models = models + model_fast_picker(full_model_list)
303
  if len(models)>0:
304
+ model_selection = st.multiselect('select models', st.session_state['models'], default=models)
305
  suggest_min_round = 182 #latest_round-50
306
  min_round, max_round = st.slider('select tournament rounds', 200, latest_round, (suggest_min_round, latest_round), 1)
307
  roundlist = [i for i in range(max_round, min_round-1, -1)]
 
323
  if prjreload:
324
  project_utils.reload_project()
325
  if len(model_df)>0:
326
+ rename_dict = {'corrPercentile': 'corr_pct', 'correlation':'corr', 'corrWMetamodel':'corr_meta', 'mmcPercentile':'mmc_pct', 'tcPercentile':'tc_pct', 'fncV3Percentile':'fncV3_pct'}
327
  model_df.rename(columns=rename_dict, inplace=True)
328
  model_df['corrmmc'] = model_df['corr'] + model_df['mmc']
329
+ model_df['corrmmc_avg_pct'] = (model_df['corr_pct'] + model_df['mmc_pct'])/2
330
+ model_df['corrtc'] = model_df['corr'] + model_df['tc']
331
+ model_df['corrtc_avg_pct'] = (model_df['corr_pct'] + model_df['tc_pct'])/2
332
+ # st.write(model_df.head(5))
333
+ # ord_cols = ['model','corr', 'mmc', 'tc', 'corrmmc', 'corrtc', 'corr_pct', 'tc_pct', 'corrtc_avg_pct','corr_meta', 'mmc_pct', 'corrmmc_avg_pct', 'roundNumber', 'roundResolveTime']
334
+ ord_cols = ['model','corr', 'tc', 'corrtc', 'corr_pct', 'tc_pct', 'corrtc_avg_pct','corr_meta', 'fncV3', 'fncV3_pct','corrmmc_avg_pct', 'roundNumber', 'roundResolveTime', 'mmc', 'corrmmc','mmc_pct']
335
+
336
  model_df = model_df[ord_cols]
337
  if project_config.SAVE_LOCAL_COPY:
338
  project_utils.pickle_data(project_config.MODEL_ROUND_RESULT_FILE, model_df)
 
342
  st.text('list of models being tracked')
343
  st.write(model_dict)
344
  try:
345
+ dshape = st.session_state['model_data'].shape
346
+ st.write(f'downloaded model result data shape is {dshape}')
347
+ st.write(model_df)
348
  except:
349
  st.write('model data was not retrieved')
350
 
 
403
  time.sleep(0.1)
404
  my_bar.progress(percent_complete)
405
  model_df = pd.concat(model_dfs, axis=0).sort_values(by=['roundNumber'], ascending=False).reset_index(drop=True)
406
+ model_df['roundResolveTime'] = pd.to_datetime(model_df['roundResolveTime'])
407
+ model_df['roundResolveTime'] = model_df['roundResolveTime'].dt.strftime(project_config.DATETIME_FORMAT3)
408
  model_df = model_df[model_df['roundNumber'].isin(roundlist)].reset_index(drop=True)
409
  return model_df
410
 
 
428
  min_selectround, max_selectround = st.slider('select plotting round range', min_round, max_round,
429
  (suggest_min_round, max_round), 1)
430
 
 
 
 
431
  select_metric = st.selectbox('Choose a metric', list(histtrend_opt.keys()), index=0,
432
  format_func=lambda x: histtrend_opt[x])
433
  round_range = [min_selectround, max_selectround]
 
438
  mean_df['mean'] = mean_df[select_metric]
439
  merge_cols = ['model', 'model avg.', 'mean']
440
  round_data = round_data.merge(right=mean_df[merge_cols], on='model', how='left').sort_values(by=['mean','model', 'roundNumber'], ascending=False)
441
+ fig = chart_pxline(round_data, 'roundNumber', y=select_metric, color='model avg.', hover_data=list(histtrend_opt.keys())+['roundResolveTime'],x_range=round_range)
 
442
  if fig is not None:
443
  st.plotly_chart(fig, use_container_width=True)
 
 
 
 
444
 
445
+
 
446
 
447
 
448
 
 
450
  def histtrend():
451
  # default_models = ['yxbot']
452
  # models = default_models.copy()
453
+ data = st.session_state['model_data'].copy()
454
+ models = data['model'].unique().tolist()
455
  model_selection = []
 
 
 
 
 
456
  default_models = model_fast_picker(models)
457
  if len(models)>0:
458
  if len(default_models)==0:
459
  default_models = [models[0]]
460
  model_selection = st.sidebar.multiselect('select models for chart', models, default=default_models)
461
 
462
+ if len(model_selection)>0:
 
 
463
  roundresult_chart(data, model_selection)
464
 
465
  # fig = px.line(df, x='roundNumber', y='corr', color='model', hover_data=['corr_pct'])
 
475
 
476
 
477
  def model_evaluation():
478
+ data = st.session_state['model_data'].copy()
479
+ models = data['model'].unique().tolist()
480
  model_selection = []
481
+ default_models = model_fast_picker(models)
482
  mean_scale = [-0.05, 0.1]
483
  count_scale = [1, 50]
484
+ sharpe_scale = [-0.2, 2]
485
  pct_scale = [0, 1]
486
  radar_scale = [0, 5]
487
 
 
 
 
 
 
488
  if len(models)>0:
489
  if len(default_models)==0:
490
+ default_models = [models[0]]
 
 
 
 
491
  model_selection = st.sidebar.multiselect('select models for chart', models, default=default_models)
492
 
493
+ if len(model_selection)>0:
 
494
  round_data = data[data['model'].isin(model_selection)].drop_duplicates(['model', 'roundNumber'],keep='first').reset_index(drop=True)
495
  min_round = int(round_data['roundNumber'].min())
496
  max_round = int(round_data['roundNumber'].max())
 
501
  min_selectround, max_selectround = st.slider('select plotting round range', min_round, max_round,
502
  (suggest_min_round, max_round), 1)
503
  round_list = [r for r in range(min_selectround, max_selectround+1)]
504
+ # defaultlist = ['corr_sharpe', 'tc_sharpe', 'corrtc_sharpe','corr_mean', 'tc_mean' 'corrtc_mean', 'corrtc_avg_pct','count']
505
+
506
+ defaultlist = ['corr_sharpe', 'tc_sharpe', 'corrtc_sharpe', 'corr_mean', 'tc_mean', 'corrtc_mean', 'corrtc_avg_pct_mean']
507
 
508
  select_metrics = st.multiselect('Metric Selection', list(model_eval_opt.keys()),
509
  format_func=lambda x: model_eval_opt[x], default=defaultlist)
510
 
 
 
 
511
 
512
  round_data = round_data[round_data['roundNumber'].isin(round_list)].reset_index(drop=True)
513
  #'need normalised radar chart + tabular view here
514
+ roundmetric_df = get_roundmetric_data(round_data).sort_values(by='corrtc_sharpe', ascending=False).reset_index(drop=True)
515
 
516
  radarmetric_df = roundmetric_df.copy(deep=True)
517
  for col in select_metrics:
 
581
  try:
582
  res_df = pd.concat(res_df, axis=0)
583
  res_df['profitability'] = res_df['realised_pl']/(res_df['current_stake']-res_df['realised_pl'])
584
+ cols = ['model', 'date', 'current_stake', 'floating_stake', 'floating_pl', 'realised_pl', 'profitability', 'roundNumber', 'roundResolved', 'payout']
585
 
586
  # res_df['date'] = res_df['date'].dt.date
587
  if onlylatest:
 
691
 
692
 
693
  def stake_overview():
694
+ # data = st.session_state['models'].copy()
695
+ models = st.session_state['models'].copy()
696
  model_selection = []
697
+ baseline_models = []
698
+ model_dict = default_model_picker()
699
  for k in model_dict.keys():
700
+ baseline_models += model_dict[k]
 
701
 
702
  default_models = model_fast_picker(models)
703
+
704
  if len(models)>0:
705
+ # if len(default_models)==0:
706
+ # default_models = baseline_models[0]
707
  model_selection = st.sidebar.multiselect('select models for chart', models, default=default_models)
708
+
709
  redownload_data = False
710
+ # download = st.sidebar.button('download stake data')
711
  if len(model_selection) > 0:
712
  if 'stake_df' not in st.session_state:
713
  redownload_data = True
 
718
  ovdf = st.session_state['stake_df']
719
  if redownload_data:
720
  ovdf = get_portfolio_overview(model_selection, onlylatest=False)
 
721
  st.session_state['stake_df'] = ovdf
722
  st.session_state['stake_overview_models'] = set(ovdf['model'].unique().tolist())
723
 
724
  chartdf = ovdf.copy(deep=True)
725
  ovdf = ovdf.drop_duplicates('model', keep='first')
726
  ovdf = ovdf.sort_values(by='floating_pl', ascending=False).reset_index(drop=True)
727
+ if len(ovdf) > 0:
728
+ overview_cols = ['model', 'current_stake', 'floating_stake', 'floating_pl', 'realised_pl']
729
+ date_text = datetime.datetime.now().strftime(project_config.DATETIME_FORMAT3)
730
  ovdf.drop(['date'], axis=1, inplace=True)
731
  stake_cts = st.columns(2)
732
  pl_cts = st.columns(2)
733
  date_label = st.empty()
734
  get_stake_graph(chartdf)
735
+ ovdf_exp = st.expander('stake data overview', expanded=True)
736
  with ovdf_exp:
737
+ st.dataframe(ovdf[overview_cols], height=max_table_height)
738
  total_current_stake = round(ovdf['current_stake'].sum(), 3)
739
  total_floating_stake = round(ovdf['floating_stake'].sum(), 3)
740
  rpl = round(ovdf['realised_pl'].sum(), 3)
741
  fpl = round(ovdf['floating_pl'].sum(), 3)
742
  current_stake_str = f'### Stake Balance: {total_current_stake:0.3f} NMR'
743
+ float_stake_str = f'### Floating Balance: {total_floating_stake:0.3f} NMR'
744
+ if rpl >= 0:
745
  real_pl_color = 'green'
746
  else:
747
  real_pl_color = 'red'
748
+ if fpl >= 0:
749
  float_pl_color = 'green'
750
  else:
751
  float_pl_color = 'red'
 
755
  stake_cts[1].markdown(float_stake_str, unsafe_allow_html=True)
756
  pl_cts[0].markdown(real_pl_str, unsafe_allow_html=True)
757
  pl_cts[1].markdown(float_pl_str, unsafe_allow_html=True)
758
+ date_label.subheader(f'Date: {date_text}')
759
+ if st.sidebar.checkbox('show breakdown by live rounds', value=False):
760
+ liveround_exp = st.expander('show breakdown by live rounds (requires extra data downloading)',expanded=True)
761
+ with liveround_exp:
762
+ stake_models = ovdf['model'].tolist()
763
+ liveround_stake_df = get_stake_by_liverounds(stake_models)
764
+ round_view(liveround_stake_df,'live_round_stake')
765
+ if st.sidebar.checkbox('show resolved round summary', value=False):
766
+ resolvedround_exp = st.expander('show resolved rounds summary for selected model group', expanded=True)
767
+ with resolvedround_exp:
768
+ get_roundresolve_history(chartdf)
769
+ # st.write(chartdf)
770
+
771
+
772
+ def get_roundresolve_history(data):
773
+ resolved_rounds = data[data['roundResolved'] == True]['roundNumber'].unique().tolist()
774
+ rsdf = data[data['roundResolved'] == True].reset_index(drop=True)
775
+ rs_date = rsdf[['date', 'roundNumber']].drop_duplicates('roundNumber').reset_index(drop=True)
776
+ numfeats = ['current_stake', 'payout']
777
+ stat1 = ['sum']
778
+ agg_rcp = [[['roundNumber'], numfeats, stat1]]
779
+ res = project_utils.groupby_agg_execution(agg_rcp, rsdf)['roundNumber'].sort_values(by='roundNumber',
780
+ ascending=False)
781
+ res = res.merge(right=rs_date, on='roundNumber')
782
+
783
+ rename_dict = {'roundNumber': 'Round', 'roundNumber_current_stake_sum': 'Total Stake',
784
+ 'roundNumber_payout_sum': 'Round P/L', 'date': 'Resolved Date'}
785
+ res.rename(columns=rename_dict, inplace=True)
786
+ st.write(res)
787
 
 
788
 
789
 
790
 
 
800
 
801
 
802
  def performance_overview():
803
+ # st.sidebar.subheader('Choose a Table View')
804
  select_app = st.sidebar.selectbox("", list(pfm_opt.keys()), index=0, format_func=lambda x: pfm_opt[x])
805
  if select_app=='data_op':
806
  data_operation()
807
+ if select_app=='liveround_view':
808
+ score_overview()
809
+ if select_app=='metric_view':
810
+ metric_overview()
811
  if select_app=='historic_trend':
812
  histtrend()
813
  if select_app=='model_evaluation':
 
831
  app_opt = {
832
  'performance_overview' : 'Performance Overview',
833
  'stake_overview': 'Stake Overview',
834
+ # 'app_setting':''
835
  }
836
 
837
 
838
  pfm_opt = {
839
  'data_op': 'Download Score Data',
840
+ 'liveround_view': 'Round Overview',
841
+ 'metric_view':'Metric Overview',
842
  'historic_trend': 'Historic Trend',
843
  'model_evaluation': 'Model Evaluation',
844
  }
 
876
 
877
 
878
  roundmetric_opt ={'corr':'Corr metrics',
879
+ 'tc': 'TC metrics',
880
+ 'corrtc': 'CorrTC metrics',
881
+ 'fncV3': 'FNCV3 metrics',
882
+ 'pct': 'Pecentage metrics',
883
  'corrmmc' : 'CorrMMC metrics',
884
+ 'mmc': 'MMC metrics'
 
 
885
  }
886
 
887
 
888
  histtrend_opt = {
889
  'corr':'Correlation',
890
  'mmc': 'MMC',
891
+ 'tc' : 'TC',
 
892
  'corr_pct': 'Correlation Percentile',
893
+ 'tc_pct' : 'TC Percentile',
894
  'mmc_pct':'MMC Percentile',
895
+ 'corrmmc': 'Correlation+MMC',
896
+ 'corrtc': 'Correlation+TC',
897
+ 'corrtc_avg_pct': 'Correlation+TC Average Percentile',
898
+ 'corrmmc_avg_pct': 'Correlation+MMC Average Percentile',
899
 
900
  }
901
 
 
903
  model_eval_opt = {
904
  'corr_sharpe' : 'Correlation Sharpe',
905
  'mmc_sharpe' : 'MMC Sharpe',
906
+ 'tc_sharpe' : 'TC Sharpe',
907
+ 'corrtc_sharpe': 'Correlation+TC Sharpe',
908
  'corrmmc_sharpe' : 'Correlation+MMC Sharpe',
 
909
  'corr_mean':'Avg. Correlation',
910
+ 'tc_mean': 'Avg. TC',
911
  'count': 'Number of Rounds',
912
+ 'mmc_mean':'Avg. MMC',
913
+ 'corrtc_mean': 'Avg. Correlation+TC',
914
  'corrmmc_mean': 'Avg. Correlation+MMC',
 
915
  'corr_pct_mean': 'Avg. Correlation Percentile',
916
  'mmc_pct_mean': 'Avg. MMC Percentile',
917
+ 'corrmmc_avg_pct_mean': 'Avg. Correlation+MMC Percentile',
918
+ 'corrtc_avg_pct_mean': 'Avg. Correlation+TC Percentile',
 
 
 
919
  }
920
 
921
  stakeoverview_plot_opt = {
 
928
  def show_session_status_info():
929
  # 'raw_performance_data'
930
  key1 = 'model_data'
931
+ key2 = 'models'
932
  if check_session_state(key1) is None:
933
  st.write(f'{key1} is None')
934
  else:
935
  st.write(f'{key1} shape is {st.session_state[key1].shape}')
936
+
937
+ if check_session_state(key2) is None:
938
+ st.write(f'{key2} is None')
939
+ else:
940
+ st.write(f'{key2} list has {len(st.session_state[key2])} models')
941
  pass
942
 
943
 
 
949
  max_height = st.slider('Please choose the height for plots', 100, 1000, 400, 50)
950
  max_table_height = st.slider('Please choose the height for tables', 100, 1000, 500, 50)
951
 
 
952
 
953
+ st.title('Numerai Dashboard')
954
  # key = 'pfm_default_model'
955
  # if check_session_state('pfm_default_model') is None:
956
  # st.write('set value')
 
961
  # st.write(st.session_state)
962
 
963
  df = get_saved_data()
964
+
965
+ if check_session_state('models') is None:
966
+ with st.spinner('updating model list'):
967
+ st.session_state['models'] = numerapi_utils.get_lb_models()
968
+
969
  show_session_status_info()
970
  # st.write(f'{key} is {chkval}')
971
 
project_tools/numerapi_utils.py CHANGED
@@ -11,43 +11,44 @@ napi = numerapi.NumerAPI()
11
  # def get_round
12
 
13
 
14
- def get_model_history(model):
15
- res = napi.daily_user_performances(model)
16
- res = pd.DataFrame.from_dict(res)
17
- res['payoutPending'] = res['payoutPending'].astype(np.float64)
18
- res['payoutSettled'] = res['payoutSettled'].astype(np.float64)
19
- res['stakeValue'] = res['stakeValue'].astype(np.float64)
20
- res['deltaRatio'] = res['payoutPending'] / res['stakeValue']
21
- res['realised_pl'] = project_utils.series_reverse_cumsum(res['payoutSettled'])
22
- res['floating_pl'] = project_utils.series_reverse_cumsum(res['payoutPending']) - res['realised_pl']
23
- res['current_stake'] = res['stakeValue'] - res['floating_pl']
24
- rename_dict = {'stakeValue':'floating_stake'}
25
- res = res.rename(columns=rename_dict)
26
- # res['equity'] = res['stakeValue'] + res['floating_pl']
27
- # cols = res.columns.tolist()
28
- # res = res[['model'] + cols]
29
-
30
- res['model'] = model
31
- cols = ['model', 'date', 'current_stake', 'floating_stake', 'payoutPending', 'floating_pl', 'realised_pl']
32
- res = res[cols]
33
- return res
 
34
 
35
 
36
  def get_portfolio_overview(models, onlylatest=True):
37
  res_df = []
38
  for m in models:
39
- try:
40
- print(f'extracting information for model {m}')
41
- if onlylatest:
42
- mdf = get_model_history(m).loc[0:0]
43
- else:
44
- mdf = get_model_history(m)
45
- res_df.append(mdf)
46
- except:
47
- print(f'no information for model {m} is available')
48
  if len(res_df)>0:
49
  res_df = pd.concat(res_df, axis=0)
50
- res_df['date'] = res_df['date'].dt.date
51
  if onlylatest:
52
  return res_df.sort_values(by='floating_pl', ascending=False).reset_index(drop=True)
53
  else:
@@ -189,6 +190,9 @@ def daily_submissions_performances_V3(modelname: str) -> List[Dict]:
189
  mmcPercentile
190
  tc
191
  tcPercentile
 
 
 
192
  corrWMetamodel
193
  payout
194
  roundResolved
@@ -214,8 +218,18 @@ def daily_submissions_performances_V3(modelname: str) -> List[Dict]:
214
  return performances
215
 
216
 
217
-
218
-
 
 
 
 
 
 
 
 
 
 
219
 
220
 
221
 
@@ -377,7 +391,7 @@ def get_model_history_v3(model):
377
  res['date'] = pd.to_datetime(res['roundResolveTime']).dt.date
378
 
379
  res['realised_pl'] = res['payout_cumsum']
380
- latest_realised_pl = res[res['roundResolved'] == True]['payout_cumsum'].max()
381
  res.loc[res['roundResolved'] == False, 'realised_pl'] = latest_realised_pl
382
 
383
  res['floating_pl'] = 0
 
11
  # def get_round
12
 
13
 
14
+ # depreciated
15
+ # def get_model_history(model):
16
+ # res = napi.daily_user_performances(model)
17
+ # res = pd.DataFrame.from_dict(res)
18
+ # res['payoutPending'] = res['payoutPending'].astype(np.float64)
19
+ # res['payoutSettled'] = res['payoutSettled'].astype(np.float64)
20
+ # res['stakeValue'] = res['stakeValue'].astype(np.float64)
21
+ # res['deltaRatio'] = res['payoutPending'] / res['stakeValue']
22
+ # res['realised_pl'] = project_utils.series_reverse_cumsum(res['payoutSettled'])
23
+ # res['floating_pl'] = project_utils.series_reverse_cumsum(res['payoutPending']) - res['realised_pl']
24
+ # res['current_stake'] = res['stakeValue'] - res['floating_pl']
25
+ # rename_dict = {'stakeValue':'floating_stake'}
26
+ # res = res.rename(columns=rename_dict)
27
+ # # res['equity'] = res['stakeValue'] + res['floating_pl']
28
+ # # cols = res.columns.tolist()
29
+ # # res = res[['model'] + cols]
30
+ #
31
+ # res['model'] = model
32
+ # cols = ['model', 'date', 'current_stake', 'floating_stake', 'payoutPending', 'floating_pl', 'realised_pl']
33
+ # res = res[cols]
34
+ # return res
35
 
36
 
37
  def get_portfolio_overview(models, onlylatest=True):
38
  res_df = []
39
  for m in models:
40
+ # try:
41
+ print(f'extracting information for model {m}')
42
+ if onlylatest:
43
+ mdf = get_model_history_v3(m).loc[0:0]
44
+ else:
45
+ mdf = get_model_history_v3(m)
46
+ res_df.append(mdf)
47
+ # except:
48
+ # print(f'no information for model {m} is available')
49
  if len(res_df)>0:
50
  res_df = pd.concat(res_df, axis=0)
51
+ # res_df['date'] = res_df['date'].dt.date
52
  if onlylatest:
53
  return res_df.sort_values(by='floating_pl', ascending=False).reset_index(drop=True)
54
  else:
 
190
  mmcPercentile
191
  tc
192
  tcPercentile
193
+ tcMultiplier
194
+ fncV3
195
+ fncV3Percentile
196
  corrWMetamodel
197
  payout
198
  roundResolved
 
218
  return performances
219
 
220
 
221
+ def get_lb_models(limit=20000, offset=0):
222
+ query = """
223
+ query($limit: Int, $offset: Int){
224
+ v2Leaderboard(limit:$limit, offset:$offset){
225
+ username
226
+ }
227
+ }
228
+ """
229
+ arguments = {'limit':limit, 'offset':offset}
230
+ data = napi.raw_query(query, arguments)['data']['v2Leaderboard']
231
+ model_list = [i['username'] for i in data]
232
+ return model_list
233
 
234
 
235
 
 
391
  res['date'] = pd.to_datetime(res['roundResolveTime']).dt.date
392
 
393
  res['realised_pl'] = res['payout_cumsum']
394
+ latest_realised_pl = res[res['roundResolved'] == True]['payout_cumsum'].values[0]
395
  res.loc[res['roundResolved'] == False, 'realised_pl'] = latest_realised_pl
396
 
397
  res['floating_pl'] = 0