kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,192,805
WINDOW_X_SIZE = 60 WINDOW_Y_SIZE = 1 PUBLIC_LEADERBOARD = False DRIVE = False KAGGLE = True FILE_NAME = 'submission.csv' if DRIVE: path = '/content/drive/My Drive/PROYECTOS/COVID-19/' elif KAGGLE: path = '.. /input/covid19-global-forecasting-week-2/' else: path = './' if KAGGLE: path_input = path path_output = '.. /working/' path_population = '.. /input/locations-population/' else: path_input = path + 'input/' path_output = path + 'output/' path_population = path_input <load_from_csv>
train_win = train.copy() train_los = train.copy() train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W', 'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']] train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2'] test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L', 'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']] test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2', 'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
df_data = pd.read_csv(path_input + 'train.csv', sep=',') df_test = pd.read_csv(path_input + 'test.csv', sep=',') df_sample_sub = pd.read_csv(path_input + 'submission.csv', sep=',') df_population = pd.read_csv(path_population + 'locations_population.csv', sep=',') <define_variables>
def feature_engineering(df): df['Seed_diff'] = df['Seed_1'] - df['Seed_2'] df['Score_diff'] = df['Score_1'] - df['Score_2'] df['Count_diff'] = df['Count_1'] - df['Count_2'] df['Var_diff'] = df['Var_1'] - df['Var_2'] df['Mean_score1'] = df['Score_1'] / df['Count_1'] df['Mean_score2'] = df['Score_2'] / df['Count_2'] df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2'] df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1'] df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2'] return df train_win = feature_engineering(train_win) train_los = feature_engineering(train_los) test = feature_engineering(test )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
date_format = "%Y-%m-%d" print(f'Train Dates: {df_data.Date.min() } - {df_data.Date.max() } // QT_DAYS: {(datetime.strptime(df_data.Date.max() , date_format)- datetime.strptime(df_data.Date.min() , date_format)).days} ') print(f'Test Dates: {df_test.Date.min() } - {df_test.Date.max() } // QT_DAYS: {(datetime.strptime(df_test.Date.max() , date_format)- datetime.strptime(df_test.Date.min() , date_format)).days} ') print(f'Window days to be predicted: {(datetime.strptime(df_test.Date.max() , date_format)- datetime.strptime(df_data.Date.max() , date_format)).days}') window_test_days =(datetime.strptime(df_test.Date.max() , date_format)- datetime.strptime(df_data.Date.max() , date_format)).days <feature_engineering>
data = pd.concat(( train_win, train_los)).reset_index(drop=True) print(data.shape) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def parseProvinces(df_data, df_test, df_population): df_data['Province_State'] = df_data['Province_State'].fillna(' df_test['Province_State'] = df_test['Province_State'].fillna(' df_population['Province.State'] = df_population['Province.State'].fillna(' return df_data, df_test, df_population def isNaN(num): return num != num def getDaysElapsedSinceDeltaCases(df, feature_names=['qt_day_100cases'], deltas=[100]): for feature_name in feature_names: df[feature_name] = 0 for i, row in tqdm(df.iterrows()): date = row['Date'] country = row['Country_Region'] province = row['Province_State'] for pos, feature_name in enumerate(feature_names): date_delta = df_data['Date'][(df_data['Country_Region']==country)&(df_data['Province_State']==province)&(df_data['Date']<=date)&(df_data['ConfirmedCases']>=deltas[pos])].min() if isNaN(date_delta): value = 0 else: value =(datetime.strptime(date, date_format)- datetime.strptime(date_delta, date_format)).days + 1 if value < 0: value = 0 df[feature_name][i] = value return df def normalizeDataModel(vector, feature, axis, dict_normalization, mode='train'): uni_data_mean = vector.mean(axis=axis) uni_data_std = vector.std(axis=axis) if mode=='train': dict_normalization[feature] = {'Mean': uni_data_mean, 'Std': uni_data_std} return dict_normalization,(vector - uni_data_mean)/uni_data_std def createCountryDicts(df_data): unique_countries = df_data['Country_Region'].unique() dict_countries = {} dict_countries_inv = {} for i, country in enumerate(unique_countries): dict_countries[country] = i dict_countries_inv[i] = country return dict_countries, dict_countries_inv def createProvinceDicts(df_data): unique_provinces = df_data['Province_State'].unique() dict_provinces = {} dict_provinces_inv = {} for i, province in enumerate(unique_provinces): dict_provinces[province] = i dict_provinces_inv[i] = province return dict_provinces, dict_provinces_inv def getCountryRepresentation(df, scale=True): dict_latitudes = {'France': [46.887, 2.552]} vector = np.zeros(( len(dict_countries.keys()), 5)) for i, country in enumerate(dict_countries): population = df['Population'][df['Country_Region']==country].values[0] if df['co_province'][df['Country_Region']==country].unique().shape[0] == 1: last_confirmed, last_fatalities = df[['ConfirmedCases', 'Fatalities']][df['Country_Region']==country].values[-1] std_last_confirmed, std_last_fatalities = df[['ConfirmedCases', 'Fatalities']][df['Country_Region']==country].values[-5:].std(axis=0) else: last_confirmed, last_fatalities = df[['Date', 'ConfirmedCases', 'Fatalities']][df['Country_Region']==country].groupby(['Date'] ).sum().values[-1] std_last_confirmed, std_last_fatalities = df[['Date', 'ConfirmedCases', 'Fatalities']][df['Country_Region']==country].groupby(['Date'] ).sum().values[-5:].std(axis=0) vector[i] = np.array([population, std_last_confirmed, std_last_fatalities, last_confirmed, last_fatalities]) if scale: scaler = StandardScaler() vector = scaler.fit_transform(vector) return vector def getProvinceRepresentation(df, scale=True): vector = np.zeros(( len(dict_provinces.keys()), 13)) countries_raw = getCountryRepresentation(df, scale=False) for i, province in enumerate(dict_provinces): if province == ' lat_, long_ = 0, 0 last_confirmed, last_fatalities = countries_raw[:, -2:].mean(axis=0) std_last_confirmed, std_last_fatalities = countries_raw[:, -2:].mean(axis=0) qt_1, qt_100, qt_1_000 = df_data[['qt_days_since_1_case', 'qt_days_since_100_cases', 'qt_days_since_1000_cases']]\ [(df_data['Date']==df_data.Date.max())&(df_data['Province_State']==' population = df_data['Population'][(df_data['Date']==df_data.Date.max())&(df_data['Province_State']==' vec_country = countries_raw.mean(axis=0) else: last_confirmed, last_fatalities = df[['ConfirmedCases', 'Fatalities']][df['Province_State']==province].values[-1] std_last_confirmed, std_last_fatalities = df[['ConfirmedCases', 'Fatalities']][df['Province_State']==province].values[-5:].std(axis=0) country = df['Country_Region'][df['Province_State']==province].values[0] qt_1, qt_100, qt_1_000 = df_data[['qt_days_since_1_case', 'qt_days_since_100_cases', 'qt_days_since_1000_cases']][df_data['Province_State']==province].values[-1:].squeeze() population = df_data['Population'][df_data['Province_State']==province].values[0] vec_country = countries_raw[dict_countries[country]] vector[i] = np.hstack([np.array([population, std_last_confirmed, std_last_fatalities, last_confirmed, last_fatalities, qt_1, qt_100, qt_1_000]), vec_country]) if scale: scaler = StandardScaler() vector = scaler.fit_transform(vector) return vector def uniVariateData(df, feature, window_x_size, window_y_size, train=True, return_all_series=False): num_series = df['Country_Province'].unique().shape[0] X = np.empty(( num_series, window_x_size, 1)) y = np.empty(( num_series, window_x_size, window_y_size)) if return_all_series: all_data = X = np.empty(( num_series, window_x_size + window_y_size)) unique_series = df_data['Country_Province'].unique() if return_all_series: for i, serie in tqdm(enumerate(unique_series)) : all_data[i] = df_data[feature][df_data['Country_Province']==serie].values[-(WINDOW_X_SIZE+WINDOW_Y_SIZE):] return all_data else: for i, serie in tqdm(enumerate(unique_series)) : data = df_data[feature][df_data['Country_Province']==serie].values if train: X[i] = data[:window_x_size].reshape(-1, 1) else: X[i] = data[-window_x_size:].reshape(-1, 1) if train: for step_ahead in range(1, window_y_size + 1): y[i, :, step_ahead - 1] = data[step_ahead:step_ahead + window_x_size] if train: return X, y else: return X def create_time_steps(length): return list(range(-length, 0)) def baseline(history): return np.mean(history[-1:]) def show_plot(plot_data, delta, title): labels = ['History', 'True Future', 'Model Prediction'] marker = ['.-', 'rx', 'go'] time_steps = create_time_steps(plot_data[0].shape[0]) if delta: future = delta else: future = 0 plt.title(title) for i, x in enumerate(plot_data): if i: plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i]) else: plt.plot(time_steps, plot_data[i].flatten() , marker[i], label=labels[i]) plt.legend() plt.xlim([time_steps[0],(future+5)*2]) plt.xlabel('Time-Step') plt.show() def multi_step_plot(history, true_future, prediction): plt.figure(figsize=(12, 6)) num_in = create_time_steps(len(history)) num_out = true_future.shape[0] plt.plot(num_in, np.array(history), label='History') plt.plot(np.arange(num_out)/1, np.array(true_future), 'bo', label='True Future') if prediction.any() : plt.plot(np.arange(num_out)/1, np.array(prediction), 'ro', label='Predicted Future') plt.legend(loc='upper left') plt.show() def testStepPlot(history, prediction): plt.figure(figsize=(12, 6)) num_in = create_time_steps(len(history)) num_out = prediction.shape[0] plt.plot(num_in, np.array(history), label='History') plt.plot(np.arange(num_out)/1, np.array(prediction), 'ro', label='Predicted Future') plt.legend(loc='upper left') plt.show() def lastTimeStepMse(y_true, y_pred): return mean_squared_error(y_true[: , -1], y_pred[:, -1]) def moovingAverage(array, size_window, weights=None): if weights: assert size_window==len(weights) new_array = np.empty(array.shape[0]) for i in range(size_window, array.shape[0]): if weights: new_array[i] = np.sum(array[i-size_window:i] * np.array(weights)) else: new_array[i] = np.mean(array[i-size_window:i]) return new_array def buildModel(lr=0.001, summary=False): input_country = Input(shape=[1], name='country') input_province = Input(shape=[1], name='province') input_confirmed_cases = Input(shape=[WINDOW_X_SIZE, 1], name='in_confirmedcases') input_fatalities = Input(shape=[WINDOW_X_SIZE, 1], name='in_fatalities') input_trend_confirmed_cases = Input(shape=[WINDOW_X_SIZE-1, 1], name='in_trend_confirmedcases') input_trend_fatalities = Input(shape=[WINDOW_X_SIZE-1, 1], name='in_trend_fatalities') input_delta_confirmed_cases = Input(shape=[WINDOW_X_SIZE-1, 1], name='in_delta_confirmedcases') input_delta_fatalities = Input(shape=[WINDOW_X_SIZE-1, 1], name='in_delta_fatalities') country_index = country_representation.shape[0] province_index = province_representation.shape[0] embedding_country = Embedding(country_index, country_representation.shape[1], weights=[country_representation], trainable=False )(input_country) embedding_province = Embedding(province_index, province_representation.shape[1], weights=[province_representation], trainable=False )(input_province) embeddings = concatenate([Flatten()(embedding_country), Flatten()(embedding_province)]) lstm_confirmed_cases = LSTM(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2 )(input_confirmed_cases) lstm_trend_confirmed_cases = LSTM(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2 )(input_trend_confirmed_cases) lstm_fatalities = LSTM(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2 )(input_fatalities) lstm_trend_fatalities = LSTM(64, return_sequences=True, dropout=0.2, recurrent_dropout=0.2 )(input_trend_fatalities) lstm_confirmed_cases = LSTM(64, return_sequences=False, dropout=0.2, recurrent_dropout=0.2 )(lstm_confirmed_cases) lstm_trend_confirmed_cases = LSTM(64, return_sequences=False, dropout=0.2, recurrent_dropout=0.2 )(lstm_trend_confirmed_cases) lstm_fatalities = LSTM(64, return_sequences=False, dropout=0.2, recurrent_dropout=0.2 )(lstm_fatalities) lstm_trend_fatalities = LSTM(64, return_sequences=False, dropout=0.2, recurrent_dropout=0.2 )(lstm_trend_fatalities) conf_cases = concatenate([lstm_confirmed_cases, lstm_trend_confirmed_cases, lstm_trend_fatalities, embeddings]) conf_cases = Dropout(0.4 )(conf_cases) conf_cases = Dense(128, activation='selu' )(conf_cases) conf_cases = Dropout(0.3 )(conf_cases) conf_cases = Dense(64, activation='selu' )(conf_cases) conf_cases = Dropout(0.2 )(conf_cases) fatalities = concatenate([lstm_fatalities, lstm_trend_fatalities, lstm_trend_confirmed_cases, embeddings]) fatalities = Dropout(0.4 )(fatalities) fatalities = Dense(128, activation='selu' )(fatalities) fatalities = Dropout(0.3 )(fatalities) fatalities = Dense(64, activation='selu' )(fatalities) fatalities = Dropout(0.2 )(fatalities) output_confirmed_cases = Dense(WINDOW_Y_SIZE, activation='relu', name='confirmed_cases' )(conf_cases) output_fatalities = Dense(WINDOW_Y_SIZE, activation='relu', name='fatalities' )(fatalities) model = Model(inputs=[input_country, input_province, input_confirmed_cases, input_fatalities, input_trend_confirmed_cases, input_trend_fatalities, input_delta_confirmed_cases, input_delta_fatalities], outputs=[output_confirmed_cases, output_fatalities]) model.compile(loss_weights=[1, 1], loss='mse', optimizer=Adam(learning_rate=lr, clipvalue=1.0), metrics=[lastTimeStepMse, 'mape']) if summary: print(model.summary()) return model <data_type_conversions>
categoricals = ["TeamName_1", "TeamName_2"] for c in categoricals: le = LabelEncoder() data[c] = data[c].fillna("NaN") data[c] = le.fit_transform(data[c]) test[c] = le.transform(test[c]) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
if PUBLIC_LEADERBOARD: df_data = df_data[df_data['Date']<='2020-03-18'] df_data['dt_datetime'] = pd.to_datetime(df_data['Date']) df_data['co_weekday'] = df_data.dt_datetime.dt.weekday df_test['dt_datetime'] = pd.to_datetime(df_test['Date']) df_test['co_weekday'] = df_test.dt_datetime.dt.weekday df_data, df_test, df_population = parseProvinces(df_data, df_test, df_population) df_data = df_data.merge(df_population[['Province.State', 'Country.Region', 'Population']], how='left', left_on=['Province_State', 'Country_Region'], right_on=['Province.State', 'Country.Region']) df_test = df_test.merge(df_population[['Province.State', 'Country.Region', 'Population']], how='left', left_on=['Province_State', 'Country_Region'], right_on=['Province.State', 'Country.Region']) df_data['Population'][df_data['Population'].isna() ] = df_data['Population'].median() df_test['Population'][df_test['Population'].isna() ] = df_test['Population'].median() df_data = getDaysElapsedSinceDeltaCases(df_data, ['qt_days_since_1_case', 'qt_days_since_100_cases', 'qt_days_since_1000_cases'], deltas=[1, 100, 1_000]) df_data['Country_Province'] = df_data.apply(lambda x: x['Province_State'] + '_' + x['Country_Region'], axis=1) df_data_original = df_data.copy() df_test_original = df_test.copy() dict_countries, dict_countries_inv = createCountryDicts(df_data) dict_provinces, dict_provinces_inv = createProvinceDicts(df_data) df_data['co_country'] = df_data['Country_Region'].apply(lambda x: dict_countries[x]) df_test['co_country'] = df_test['Country_Region'].apply(lambda x: dict_countries[x]) df_data['co_province'] = df_data['Province_State'].apply(lambda x: dict_provinces[x]) df_test['co_province'] = df_test['Province_State'].apply(lambda x: dict_provinces[x]) country_representation = getCountryRepresentation(df_data) province_representation = getProvinceRepresentation(df_data) df_data['ConfirmedCases'] = np.log1p(df_data['ConfirmedCases']) df_data['Fatalities'] = np.log1p(df_data['Fatalities']) data_confirmed_cases = uniVariateData(df_data, 'ConfirmedCases', WINDOW_X_SIZE, WINDOW_Y_SIZE, return_all_series=True) data_fatalities = uniVariateData(df_data, 'Fatalities', WINDOW_X_SIZE, WINDOW_Y_SIZE, return_all_series=True) X_confirmedcases = data_confirmed_cases[:, :WINDOW_X_SIZE] y_confirmedcases = data_confirmed_cases[:, WINDOW_X_SIZE:] X_fatalities = data_fatalities[:, :WINDOW_X_SIZE] y_fatalities = data_fatalities[:, WINDOW_X_SIZE:] X_countries = uniVariateData(df_data, 'co_country', WINDOW_X_SIZE, WINDOW_Y_SIZE, train=False) X_province = uniVariateData(df_data, 'co_province', WINDOW_X_SIZE, WINDOW_Y_SIZE, train=False) X_countries = np.expand_dims(X_countries[:, 0, 0], axis=1) X_province = np.expand_dims(X_province[:, 0, 0], axis=1) X_trend_confirmed_cases = np.expm1(X_confirmedcases[:, 1:])- np.expm1(X_confirmedcases[:, :-1]) X_trend_fatalities = np.expm1(X_fatalities[:, 1:])- np.expm1(X_fatalities[:, :-1]) for i in range(X_trend_confirmed_cases.shape[0]): X_trend_confirmed_cases[i] = moovingAverage(X_trend_confirmed_cases[i], size_window=7) X_trend_fatalities[i] = moovingAverage(X_trend_fatalities[i], size_window=7) X_delta_confirmed_cases = np.nan_to_num(( np.expm1(X_confirmedcases[:, 1:])- np.expm1(X_confirmedcases[:, :-1])) /np.expm1(X_confirmedcases[:, :-1]), nan=0, posinf=0) X_delta_fatalities = np.nan_to_num(( np.expm1(X_fatalities[:, 1:])- np.expm1(X_fatalities[:, :-1])) /np.expm1(X_fatalities[:, :-1]), nan=0, posinf=0) mean_series = X_confirmedcases.mean(axis=1) series_with_no_confimred_cases = {i for i, serie in enumerate(mean_series)if serie==0} series_with_confimred_cases = [i for i in range(X_confirmedcases.shape[0])if i not in series_with_no_confimred_cases] X_confirmedcases = np.expand_dims(X_confirmedcases, axis=2) X_fatalities = np.expand_dims(X_fatalities, axis=2) X_trend_confirmed_cases = np.expand_dims(X_trend_confirmed_cases, axis=2) X_trend_fatalities = np.expand_dims(X_trend_fatalities, axis=2) X_delta_confirmed_cases = np.expand_dims(X_delta_confirmed_cases, axis=2) X_delta_fatalities = np.expand_dims(X_delta_fatalities, axis=2) print(X_confirmedcases.mean() , y_confirmedcases.mean() , X_confirmedcases.std() , y_confirmedcases.std()) print(X_fatalities.mean() , y_fatalities.mean() , X_fatalities.std() , y_fatalities.std()) x_train = [X_countries[series_with_confimred_cases], X_province[series_with_confimred_cases], X_confirmedcases[series_with_confimred_cases], X_fatalities[series_with_confimred_cases], X_trend_confirmed_cases[series_with_confimred_cases], X_trend_fatalities[series_with_confimred_cases], X_delta_confirmed_cases[series_with_confimred_cases], X_delta_fatalities[series_with_confimred_cases]] y_train = [y_confirmedcases[series_with_confimred_cases], y_fatalities[series_with_confimred_cases]] <define_variables>
target = 'result' features = data.columns.values.tolist() features.remove(target )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
dt_ini = datetime.strptime(df_data_original.Date.max() , date_format)- timedelta(WINDOW_X_SIZE) dt_end = datetime.strptime(df_data_original.Date.max() , date_format) dt_max_test = df_test['Date'].max() X_test_confirmedcases = X_confirmedcases X_test_fatalities = X_fatalities X_test_countries = X_countries X_test_province = X_province X_test_trend_confirmed_cases = X_trend_confirmed_cases X_test_trend_fatalities = X_trend_fatalities X_test_delta_confirmed_cases = X_delta_confirmed_cases X_test_delta_fatalities = X_delta_fatalities i = 0 X_final_confirmedcases = X_test_confirmedcases X_final_fatalities = X_test_fatalities while dt_end.strftime(date_format)< dt_max_test: print('==='*20) print(f'dt_ini: {dt_ini.strftime(date_format)}') print(f'dt_end: {dt_end.strftime(date_format)}') print('==='*20) x_test = [X_test_countries, X_test_province, X_test_confirmedcases, X_test_fatalities, X_test_trend_confirmed_cases, X_test_trend_fatalities, X_delta_confirmed_cases, X_delta_fatalities] y_test_predictions = model.predict(x_test) y_pred_confirmedcases_unscaled = y_test_predictions[0] y_pred_fatalities_unscaled = y_test_predictions[1] y_pred_confirmedcases_unscaled[list(series_with_no_confimred_cases)] = 0 y_pred_fatalities_unscaled[list(series_with_no_confimred_cases)] = 0 if i==0: X_final_confirmedcases = np.concatenate([X_final_confirmedcases.squeeze() , y_pred_confirmedcases_unscaled], axis=1) X_final_fatalities = np.concatenate([X_final_fatalities.squeeze() , y_pred_fatalities_unscaled], axis=1) else: X_final_confirmedcases = np.concatenate([X_final_confirmedcases, y_pred_confirmedcases_unscaled], axis=1) X_final_fatalities = np.concatenate([X_final_fatalities, y_pred_fatalities_unscaled], axis=1) X_test_confirmedcases = np.expand_dims(X_final_confirmedcases[: ,-WINDOW_X_SIZE:], axis=2) X_test_fatalities = np.expand_dims(X_final_fatalities[:, -WINDOW_X_SIZE:], axis=2) X_test_trend_confirmed_cases = np.expm1(X_test_confirmedcases[:, 1:])- np.expm1(X_test_confirmedcases[:, :-1]) X_test_trend_fatalities = np.expm1(X_test_fatalities[:, 1:])- np.expm1(X_test_fatalities[:, :-1]) for i in range(X_trend_confirmed_cases.shape[0]): X_test_trend_confirmed_cases[i] = np.expand_dims(moovingAverage(X_test_trend_confirmed_cases[i], size_window=5), axis=1) X_test_trend_fatalities[i] = np.expand_dims(moovingAverage(X_test_trend_fatalities[i], size_window=5), axis=1) X_test_delta_confirmed_cases = np.nan_to_num(( np.expm1(X_test_confirmedcases[:, 1:])- np.expm1(X_test_confirmedcases[:, :-1])) /np.expm1(X_test_confirmedcases[:, :-1]), nan=0, posinf=0) X_test_delta_fatalities = np.nan_to_num(( np.expm1(X_test_fatalities[:, 1:])- np.expm1(X_test_fatalities[:, :-1])) /np.expm1(X_test_fatalities[:, :-1]), nan=0, posinf=0) dt_ini += timedelta(WINDOW_Y_SIZE) dt_end += timedelta(WINDOW_Y_SIZE) i += 1 <merge>
nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
df_test = pd.merge(df_test_original, df_data[['Province_State', 'Country_Region', 'Date', 'ConfirmedCases', 'Fatalities']], how='left', on=['Province_State', 'Country_Region', 'Date']) df_test['ConfirmedCases'][df_test['Date']>df_data['Date'].max() ] = X_final_confirmedcases[:, WINDOW_X_SIZE:].flatten() df_test['Fatalities'][df_test['Date']>df_data['Date'].max() ] = X_final_fatalities[:, WINDOW_X_SIZE:].flatten() submission = df_test[['ForecastId', 'ConfirmedCases', 'Fatalities']][df_test.Date>=df_test.Date.min() ].reset_index(drop = True) if not PUBLIC_LEADERBOARD: assert submission.shape[0] == df_sample_sub.shape[0] assert submission.shape[1] == df_sample_sub.shape[1] submission['ConfirmedCases'] = np.expm1(submission['ConfirmedCases']) submission['Fatalities'] = np.expm1(submission['Fatalities']) print(submission.describe()) submission.to_csv(path_output + FILE_NAME, sep=',', index=False, header=True) <compute_test_metric>
lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv>
catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10, cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-2/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-2/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv') submission_df['Pred'] = 0.7 * lgbm.y_pred + 0.2 * catb.y_pred + 0.1 * nn.y_pred submission_df
Google Cloud & NCAA® ML Competition 2020-NCAAW
8,192,805
<compute_test_metric><EOS>
submission_df.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAW
5,248,655
<SOS> metric: OpenImagesObjDetectionSegmentationAP Kaggle data source: open-images-2019-instance-segmentation<load_from_csv>
import numpy as np import pandas as pd import os import sys from tqdm import tqdm from pathlib import Path import tensorflow as tf import skimage.io import matplotlib.pyplot as plt
Open Images 2019 - Instance Segmentation
5,248,655
PATH_WEEK2='/kaggle/input/covid19-global-forecasting-week-2' df_train = pd.read_csv(f'{PATH_WEEK2}/train.csv') df_test = pd.read_csv(f'{PATH_WEEK2}/test.csv') df_train.head() df_test.head() df_train.rename(columns={'Country_Region':'Country'}, inplace=True) df_test.rename(columns={'Country_Region':'Country'}, inplace=True) df_train.rename(columns={'Province_State':'State'}, inplace=True) df_test.rename(columns={'Province_State':'State'}, inplace=True) df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True) df_train.info() df_test.info() y1_Train = df_train.iloc[:, -2] y1_Train.head() y2_Train = df_train.iloc[:, -1] y2_Train.head() EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state <data_type_conversions>
!git clone https://www.github.com/matterport/Mask_RCNN.git os.chdir('Mask_RCNN') !rm -rf.git !rm -rf images assets
Open Images 2019 - Instance Segmentation
5,248,655
X_Train = df_train.copy() X_Train['State'].fillna(EMPTY_VAL, inplace=True) X_Train['State'] = X_Train.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Train.loc[:, 'Date'] = X_Train.Date.dt.strftime("%m%d") X_Train["Date"] = X_Train["Date"].astype(int) X_Train.head() X_Test = df_test.copy() X_Test['State'].fillna(EMPTY_VAL, inplace=True) X_Test['State'] = X_Test.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Test.loc[:, 'Date'] = X_Test.Date.dt.strftime("%m%d") X_Test["Date"] = X_Test["Date"].astype(int) X_Test.head() <categorify>
DATA_DIR = Path('/kaggle/input') ROOT_DIR = Path('/kaggle/working' )
Open Images 2019 - Instance Segmentation
5,248,655
le = preprocessing.LabelEncoder() X_Train.Country = le.fit_transform(X_Train.Country) X_Train['State'] = le.fit_transform(X_Train['State']) X_Train.head() X_Test.Country = le.fit_transform(X_Test.Country) X_Test['State'] = le.fit_transform(X_Test['State']) X_Test.head() df_train.head() df_train.loc[df_train.Country == 'Afghanistan', :] df_test.tail() <categorify>
sys.path.append(ROOT_DIR/'Mask_RCNN' )
Open Images 2019 - Instance Segmentation
5,248,655
filterwarnings('ignore') le = preprocessing.LabelEncoder() countries = X_Train.Country.unique() df_out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for country in countries: states = X_Train.loc[X_Train.Country == country, :].State.unique() for state in states: X_Train_CS = X_Train.loc[(X_Train.Country == country)&(X_Train.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']] y1_Train_CS = X_Train_CS.loc[:, 'ConfirmedCases'] y2_Train_CS = X_Train_CS.loc[:, 'Fatalities'] X_Train_CS = X_Train_CS.loc[:, ['State', 'Country', 'Date']] X_Train_CS.Country = le.fit_transform(X_Train_CS.Country) X_Train_CS['State'] = le.fit_transform(X_Train_CS['State']) X_Test_CS = X_Test.loc[(X_Test.Country == country)&(X_Test.State == state), ['State', 'Country', 'Date', 'ForecastId']] X_Test_CS_Id = X_Test_CS.loc[:, 'ForecastId'] X_Test_CS = X_Test_CS.loc[:, ['State', 'Country', 'Date']] X_Test_CS.Country = le.fit_transform(X_Test_CS.Country) X_Test_CS['State'] = le.fit_transform(X_Test_CS['State']) model1 = XGBRegressor(n_estimators=1000) model1.fit(X_Train_CS, y1_Train_CS) y1_pred = model1.predict(X_Test_CS) model2 = XGBRegressor(n_estimators=1000) model2.fit(X_Train_CS, y2_Train_CS) y2_pred = model2.predict(X_Test_CS) df = pd.DataFrame({'ForecastId': X_Test_CS_Id, 'ConfirmedCases': y1_pred, 'Fatalities': y2_pred}) df_out = pd.concat([df_out, df], axis=0) <save_to_csv>
!pip install pycocotools
Open Images 2019 - Instance Segmentation
5,248,655
df_out.ForecastId = df_out.ForecastId.astype('int') df_out.tail() df_out.to_csv('submission.csv', index=False )<import_modules>
from mrcnn.config import Config from mrcnn import utils import mrcnn.model as modellib from mrcnn import visualize from mrcnn.model import log
Open Images 2019 - Instance Segmentation
5,248,655
print("Read in libraries") <load_from_csv>
!wget https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5
Open Images 2019 - Instance Segmentation
5,248,655
PATH_WEEK2='/kaggle/input/covid19-global-forecasting-week-2' df_train = pd.read_csv(f'{PATH_WEEK2}/train.csv') df_test = pd.read_csv(f'{PATH_WEEK2}/test.csv') df_train.head() df_test.head() df_train.rename(columns={'Country_Region':'Country'}, inplace=True) df_test.rename(columns={'Country_Region':'Country'}, inplace=True) df_train.rename(columns={'Province_State':'State'}, inplace=True) df_test.rename(columns={'Province_State':'State'}, inplace=True) df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True) df_train.info() df_test.info() y1_Train = df_train.iloc[:, -2] y1_Train.head() y2_Train = df_train.iloc[:, -1] y2_Train.head() EMPTY_VAL = "EMPTY_VAL" def fillState(state, country): if state == EMPTY_VAL: return country return state <data_type_conversions>
COCO_MODEL_PATH = 'mask_rcnn_coco.h5'
Open Images 2019 - Instance Segmentation
5,248,655
X_Train = df_train.copy() X_Train['State'].fillna(EMPTY_VAL, inplace=True) X_Train['State'] = X_Train.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Train.loc[:, 'Date'] = X_Train.Date.dt.strftime("%m%d") X_Train["Date"] = X_Train["Date"].astype(int) X_Train.head() X_Test = df_test.copy() X_Test['State'].fillna(EMPTY_VAL, inplace=True) X_Test['State'] = X_Test.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1) X_Test.loc[:, 'Date'] = X_Test.Date.dt.strftime("%m%d") X_Test["Date"] = X_Test["Date"].astype(int) X_Test.head() <categorify>
class InferenceConfig(coco.CocoConfig): GPU_COUNT = 1 IMAGES_PER_GPU = 1 IMAGE_MIN_DIM = 256 IMAGE_MAX_DIM = 256 config = InferenceConfig() config.display()
Open Images 2019 - Instance Segmentation
5,248,655
le = preprocessing.LabelEncoder() X_Train.Country = le.fit_transform(X_Train.Country) X_Train['State'] = le.fit_transform(X_Train['State']) X_Train.head() X_Test.Country = le.fit_transform(X_Test.Country) X_Test['State'] = le.fit_transform(X_Test['State']) X_Test.head() df_train.head() df_train.loc[df_train.Country == 'Afghanistan', :] df_test.tail() <categorify>
model = modellib.MaskRCNN(mode="inference", config=config, model_dir=ROOT_DIR) model.load_weights(COCO_MODEL_PATH, by_name=True )
Open Images 2019 - Instance Segmentation
5,248,655
filterwarnings('ignore') le = preprocessing.LabelEncoder() countries = X_Train.Country.unique() df_out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []}) for country in countries: states = X_Train.loc[X_Train.Country == country, :].State.unique() for state in states: X_Train_CS = X_Train.loc[(X_Train.Country == country)&(X_Train.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']] y1_Train_CS = X_Train_CS.loc[:, 'ConfirmedCases'] y2_Train_CS = X_Train_CS.loc[:, 'Fatalities'] X_Train_CS = X_Train_CS.loc[:, ['State', 'Country', 'Date']] X_Train_CS.Country = le.fit_transform(X_Train_CS.Country) X_Train_CS['State'] = le.fit_transform(X_Train_CS['State']) X_Test_CS = X_Test.loc[(X_Test.Country == country)&(X_Test.State == state), ['State', 'Country', 'Date', 'ForecastId']] X_Test_CS_Id = X_Test_CS.loc[:, 'ForecastId'] X_Test_CS = X_Test_CS.loc[:, ['State', 'Country', 'Date']] X_Test_CS.Country = le.fit_transform(X_Test_CS.Country) X_Test_CS['State'] = le.fit_transform(X_Test_CS['State']) model1 = XGBRegressor(n_estimators=1000) model1.fit(X_Train_CS, y1_Train_CS) y1_pred = model1.predict(X_Test_CS) model2 = XGBRegressor(n_estimators=1000) model2.fit(X_Train_CS, y2_Train_CS) y2_pred = model2.predict(X_Test_CS) df = pd.DataFrame({'ForecastId': X_Test_CS_Id, 'ConfirmedCases': y1_pred, 'Fatalities': y2_pred}) df_out = pd.concat([df_out, df], axis=0) <save_to_csv>
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
Open Images 2019 - Instance Segmentation
5,248,655
df_out.ForecastId = df_out.ForecastId.astype('int') df_out.tail() df_out.to_csv('submission.csv', index=False )<import_modules>
IMAGE_DIR = "/kaggle/input/test/"
Open Images 2019 - Instance Segmentation
5,248,655
idx = pd.IndexSlice<load_from_csv>
class_lookup_df = pd.read_csv("./challenge-2019-classes-description-segmentable.csv", header=None) empty_submission_df = pd.read_csv("input/sample_empty_submission.csv" )
Open Images 2019 - Instance Segmentation
5,248,655
df = pd.read_csv('.. /input/covid19-global-forecasting-week-2/train.csv', index_col=0 )<filter>
class_lookup_df.columns = ["encoded_label","label"] class_lookup_df['label'] = class_lookup_df['label'].str.lower() class_lookup_df.head()
Open Images 2019 - Instance Segmentation
5,248,655
df['Country_Region'].replace('Taiwan*', 'Taiwan', inplace=True) df = df[df['Country_Region'] != 'Diamond Princess']<load_from_csv>
def encode_binary_mask(mask: np.ndarray)-> t.Text: if mask.dtype != np.bool: raise ValueError("encode_binary_mask expects a binary mask, received dtype == %s" % mask.dtype) mask = np.squeeze(mask) if len(mask.shape)!= 2: raise ValueError("encode_binary_mask expects a 2d mask, received shape == %s" % mask.shape) mask_to_encode = mask.reshape(mask.shape[0], mask.shape[1], 1) mask_to_encode = mask_to_encode.astype(np.uint8) mask_to_encode = np.asfortranarray(mask_to_encode) encoded_mask = coco_mask.encode(mask_to_encode)[0]["counts"] binary_str = zlib.compress(encoded_mask, zlib.Z_BEST_COMPRESSION) base64_str = base64.b64encode(binary_str) return base64_str
Open Images 2019 - Instance Segmentation
5,248,655
df_codes =(pd.read_csv('.. /input/my-covid19-dataset/iso-country-codes.csv') .rename(columns={'Country Name': 'Country_Region', 'Alpha-3 code': 'Country Code'}) .loc[:,['Country_Region', 'Country Code']]) df =(pd.merge(df, df_codes, how='left', on='Country_Region'))<feature_engineering>
ImageID_list = [] ImageWidth_list = [] ImageHeight_list = [] PredictionString_list = [] for num, row in tqdm(empty_submission_df.iterrows() , total=len(empty_submission_df)) : filename = row["ImageID"] + ".jpg" image = skimage.io.imread(os.path.join(IMAGE_DIR, filename)) results = model.detect([image]) r = results[0] height = image.shape[0] width = image.shape[1] PredictionString = "" for i in range(len(r["class_ids"])) : class_id = r["class_ids"][i] roi = r["rois"][i] mask = r["masks"][:,:,i] confidence = r["scores"][i] encoded_mask = encode_binary_mask(mask) labelname = class_names[r['class_ids'][i]] if class_lookup_df[class_lookup_df["label"] == labelname].shape[0] == 0: continue encoded_label = class_lookup_df[class_lookup_df["label"] == labelname]["encoded_label"].item() PredictionString += encoded_label PredictionString += " " PredictionString += str(confidence) PredictionString += " " PredictionString += encoded_mask.decode() PredictionString += " " ImageID_list.append(row["ImageID"]) ImageWidth_list.append(width) ImageHeight_list.append(height) PredictionString_list.append(PredictionString )
Open Images 2019 - Instance Segmentation
5,248,655
def location(state, country): if type(state)==str and not state==country: return country + ' - ' + state else: return country df['Date'] = df['Date'].apply(lambda x:(dt.datetime.strptime(x, '%Y-%m-%d'))) df['Location'] = df[['Province_State', 'Country_Region']].apply(lambda row: location(row[0],row[1]), axis=1) t_start = df['Date'].unique() [0] t_end = df['Date'].unique() [-1] print('Number of Locations: ' + str(len(df['Location'].unique()))) print('Dates: from ' + np.datetime_as_string(t_start, unit='D')+ ' to ' + np.datetime_as_string(t_end, unit='D')+ ' ' )<filter>
results=pd.DataFrame({"ImageID":ImageID_list, "ImageWidth":ImageWidth_list, "ImageHeight":ImageHeight_list, "PredictionString":PredictionString_list } )
Open Images 2019 - Instance Segmentation
5,248,655
<load_from_csv><EOS>
results.to_csv("submission.csv", index=False )
Open Images 2019 - Instance Segmentation
9,269,178
<SOS> metric: rmse Kaggle data source: find-me-that-fish<split>
%matplotlib inline pd.set_option('max_colwidth',250) pd.set_option('max_columns',250) pd.set_option('max_rows',500 )
Find me that fish
9,269,178
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=99 )<choose_model_class>
train = pd.read_csv('.. /input/duth-dbirlab2-1/train.csv') test = pd.read_csv('.. /input/duth-dbirlab2-1/test.csv' )
Find me that fish
9,269,178
n_neighbors = 10 knn = KNeighborsRegressor(n_neighbors=n_neighbors, weights='uniform', metric='minkowski', p=2 )<choose_model_class>
for df in [train,test]: for c in df.drop(['obs_id'],axis=1): if(df[c].dtype=='object'): lbl = LabelEncoder() lbl.fit(list(df[c].values)) df[c] = lbl.transform(list(df[c].values))
Find me that fish
9,269,178
radius = 1.0 rnn = RadiusNeighborsRegressor(radius=radius, weights='uniform', metric='minkowski', p=2 )<choose_model_class>
ntrain = train.shape[0] ntest = test.shape[0] SEED = 11 NFOLDS = 5 folds = KFold(n_splits= NFOLDS, random_state=SEED, shuffle=True )
Find me that fish
9,269,178
lnr = LinearRegression(copy_X=True, fit_intercept=False )<predict_on_test>
cols_to_exclude = ['obs_id','Overall Probability'] df_train_columns = [c for c in train.columns if c not in cols_to_exclude] y_train = train['Overall Probability'].ravel() x_train = train[df_train_columns].values x_test = test[df_train_columns].values
Find me that fish
9,269,178
lnr.fit(X_train,y_train) y_pred = lnr.predict(X_test) score = r2_score(y_test, y_pred, multioutput='uniform_average') print('Coefficient of determination of the linear regression: {:.2%}'.format(score))<choose_model_class>
def train_model(X_train, X_test, Y_train, folds=5, model_type='lgb',plot_feature_importance=True): oof = np.zeros(ntrain) prediction = np.zeros(ntest) scores = [] feature_importance = pd.DataFrame() for fold_n,(train_index, valid_index)in enumerate(folds.split(X_train,Y_train)) : print('Fold', fold_n+1, 'started at', time.ctime()) x_train, x_valid = X_train[train_index], X_train[valid_index] y_train, y_valid = Y_train[train_index], Y_train[valid_index] if model_type == 'ridge': model = linear_model.Ridge(alpha=.5) model.fit(x_train, y_train) y_pred_valid = model.predict(x_valid) y_pred = model.predict(X_test) if model_type == 'linear': model = LinearRegression() model.fit(x_train, y_train) y_pred_valid = model.predict(x_valid) y_pred = model.predict(X_test) if model_type == 'rf': model = RandomForestRegressor(min_weight_fraction_leaf=0.05,n_jobs=-2,random_state=0, max_depth=4, n_estimators=100) model.fit(x_train, y_train) y_pred_valid = model.predict(x_valid) y_pred = model.predict(X_test) if model_type == 'lgb': lgb_params = { 'num_leaves': 7, 'min_data_in_leaf': 20, 'min_sum_hessian_in_leaf': 11, 'objective': 'regression', 'max_depth': 6, 'learning_rate': 0.05, 'boosting': "gbdt", 'feature_fraction': 0.8, 'feature_fraction_seed': 9, 'max_bin ': 55, "bagging_freq": 5, "bagging_fraction": 0.8, "bagging_seed": 9, 'metric': 'rmse', 'lambda_l1': 0.1, 'verbosity': -1, 'min_child_weight': 5.34, 'reg_alpha': 1.130, 'reg_lambda': 0.360, 'subsample': 0.8, } model = lgb.LGBMRegressor(**lgb_params, n_estimators = 20000, n_jobs = -1) model.fit(x_train, y_train, eval_set=[(x_train, y_train),(x_valid, y_valid)], eval_metric='rmse',verbose=10000, early_stopping_rounds=100) y_pred_valid = model.predict(x_valid) y_pred_valid = np.clip(y_pred_valid, a_min=0, a_max=1) y_pred = model.predict(X_test, num_iteration=model.best_iteration_) y_pred = np.clip(y_pred, a_min=0, a_max=1) fold_importance = pd.DataFrame() fold_importance["feature"] = train[df_train_columns].columns fold_importance["importance"] = model.feature_importances_ fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) oof[valid_index] = y_pred_valid.reshape(-1,) scores.append(mean_squared_error(y_valid, y_pred_valid)** 0.5) prediction += y_pred if(model_type == 'lgb' and plot_feature_importance==True): cols = feature_importance[["feature", "importance"]].groupby("feature" ).mean().sort_values( by="importance", ascending=False)[:50].index best_features = feature_importance.loc[feature_importance.feature.isin(cols)] plt.figure(figsize=(16, 12)) ; sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)) ; plt.title('LGB Features(avg over folds)') prediction /= NFOLDS print('CV mean score: {0:.5f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores))) return oof, prediction
Find me that fish
9,269,178
rdg = RidgeCV(alphas=[10**n for n in range(-4,4)] )<predict_on_test>
oof, prediction = train_model(X_train=x_train, X_test=x_test, Y_train=y_train, folds=folds, model_type='rf', plot_feature_importance=True )
Find me that fish
9,269,178
<predict_on_test><EOS>
sample_submission = pd.read_csv('.. /input/duth-dbirlab2-1/sample_submission.csv') sub_df = pd.DataFrame({"obs_id":sample_submission["obs_id"].values}) sub_df["Overall Probability"] = prediction sub_df["Overall Probability"] = sub_df["Overall Probability"].apply(lambda x: 1 if x>1 else 0 if x<0 else x) sub_df.to_csv("submission.csv", index=False )
Find me that fish
10,784,932
<SOS> metric: custom metric Kaggle data source: higgs-boson-machine-learning-challenge<set_options>
warnings.filterwarnings("ignore")
Higgs Boson Machine Learning Challenge
10,784,932
%matplotlib inline InteractiveShell.ast_node_interactivity = "all" pd.set_option('display.max_columns', 99) pd.set_option('display.max_rows', 99) <set_options>
train = pd.read_csv('.. /input/higgs-boson/training.zip') test = pd.read_csv('.. /input/higgs-boson/test.zip') print(train.shape,test.shape )
Higgs Boson Machine Learning Challenge
10,784,932
plt.rcParams['figure.figsize'] = [16, 10] plt.rcParams['font.size'] = 14 sns.set_palette(sns.color_palette('tab20', 20)) <load_from_csv>
train = train.drop(['Weight'], axis=1 )
Higgs Boson Machine Learning Challenge
10,784,932
COMP = '.. /input/covid19-global-forecasting-week-2' DATEFORMAT = '%Y-%m-%d' def get_comp_data(COMP): train = pd.read_csv(f'{COMP}/train.csv') test = pd.read_csv(f'{COMP}/test.csv') submission = pd.read_csv(f'{COMP}/submission.csv') print(train.shape, test.shape, submission.shape) train['Country_Region'] = train['Country_Region'].str.replace(',', '') test['Country_Region'] = test['Country_Region'].str.replace(',', '') train['Location'] = train['Country_Region'] + '-' + train['Province_State'].fillna('') test['Location'] = test['Country_Region'] + '-' + test['Province_State'].fillna('') train['LogConfirmed'] = to_log(train.ConfirmedCases) train['LogFatalities'] = to_log(train.Fatalities) train = train.drop(columns=['Province_State']) test = test.drop(columns=['Province_State']) country_codes = pd.read_csv('.. /input/covid19-metadata/country_codes.csv', keep_default_na=False) train = train.merge(country_codes, on='Country_Region', how='left') test = test.merge(country_codes, on='Country_Region', how='left') train['DateTime'] = pd.to_datetime(train['Date']) test['DateTime'] = pd.to_datetime(test['Date']) return train, test, submission def process_each_location(df): dfs = [] for loc, df in tqdm(df.groupby('Location')) : df = df.sort_values(by='Date') df['Fatalities'] = df['Fatalities'].cummax() df['ConfirmedCases'] = df['ConfirmedCases'].cummax() df['LogFatalities'] = df['LogFatalities'].cummax() df['LogConfirmed'] = df['LogConfirmed'].cummax() df['LogConfirmedNextDay'] = df['LogConfirmed'].shift(-1) df['ConfirmedNextDay'] = df['ConfirmedCases'].shift(-1) df['DateNextDay'] = df['Date'].shift(-1) df['LogFatalitiesNextDay'] = df['LogFatalities'].shift(-1) df['FatalitiesNextDay'] = df['Fatalities'].shift(-1) df['LogConfirmedDelta'] = df['LogConfirmedNextDay'] - df['LogConfirmed'] df['ConfirmedDelta'] = df['ConfirmedNextDay'] - df['ConfirmedCases'] df['LogFatalitiesDelta'] = df['LogFatalitiesNextDay'] - df['LogFatalities'] df['FatalitiesDelta'] = df['FatalitiesNextDay'] - df['Fatalities'] dfs.append(df) return pd.concat(dfs) def add_days(d, k): return dt.datetime.strptime(d, DATEFORMAT)+ dt.timedelta(days=k) def to_log(x): return np.log(x + 1) def to_exp(x): return np.exp(x)- 1 <save_to_csv>
enc = LabelEncoder() train['Label'] = enc.fit_transform(train['Label']) train.head()
Higgs Boson Machine Learning Challenge
10,784,932
train = train.sort_values(by='Date') countries_latest_state = train[train['Date'] == TRAIN_END].groupby([ 'Country_Region', 'continent', 'geo_region', 'country_iso_code_3'] ).sum() [[ 'ConfirmedCases', 'Fatalities']].reset_index() countries_latest_state['Log10Confirmed'] = np.log10(countries_latest_state.ConfirmedCases + 1) countries_latest_state['Log10Fatalities'] = np.log10(countries_latest_state.Fatalities + 1) countries_latest_state = countries_latest_state.sort_values(by='Fatalities', ascending=False) countries_latest_state.to_csv('countries_latest_state.csv', index=False) countries_latest_state.shape countries_latest_state.head()<merge>
y = train["Label"] X = train X_test = test
Higgs Boson Machine Learning Challenge
10,784,932
latest_loc = train[train['Date'] == TRAIN_END][['Location', 'ConfirmedCases', 'Fatalities']] max_loc = train.groupby(['Location'])[['ConfirmedCases', 'Fatalities']].max().reset_index() check = pd.merge(latest_loc, max_loc, on='Location') np.mean(check.ConfirmedCases_x == check.ConfirmedCases_y) np.mean(check.Fatalities_x == check.Fatalities_y) check[check.Fatalities_x != check.Fatalities_y] check[check.ConfirmedCases_x != check.ConfirmedCases_y]<feature_engineering>
X.set_index(['EventId'],inplace = True) X_test.set_index(['EventId'],inplace = True) X = X.drop(['Label'], axis=1) X.head()
Higgs Boson Machine Learning Challenge
10,784,932
regional_progress = train_clean.groupby(['DateTime', 'continent'] ).sum() [['ConfirmedCases', 'Fatalities']].reset_index() regional_progress['Log10Confirmed'] = np.log10(regional_progress.ConfirmedCases + 1) regional_progress['Log10Fatalities'] = np.log10(regional_progress.Fatalities + 1) regional_progress = regional_progress[regional_progress.continent != '<merge>
X = normalize(X) X_test = normalize(X_test )
Higgs Boson Machine Learning Challenge
10,784,932
countries_0301 = country_progress[country_progress.Date == '2020-03-01'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_0331 = country_progress[country_progress.Date == '2020-03-31'][[ 'Country_Region', 'ConfirmedCases', 'Fatalities']] countries_in_march = pd.merge(countries_0301, countries_0331, on='Country_Region', suffixes=['_0301', '_0331']) countries_in_march['IncreaseInMarch'] = countries_in_march.ConfirmedCases_0331 /(countries_in_march.ConfirmedCases_0301 + 1) countries_in_march = countries_in_march[countries_in_march.ConfirmedCases_0331 > 200].sort_values( by='IncreaseInMarch', ascending=False) countries_in_march.tail(15 )<save_to_csv>
Higgs Boson Machine Learning Challenge
10,784,932
train_clean['Geo latest = train_clean[train_clean.Date == '2020-03-31'][[ 'Geo daily_confirmed_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_confirmed_deltas = latest.merge(daily_confirmed_deltas, on='Geo daily_confirmed_deltas.shape daily_confirmed_deltas.head() daily_confirmed_deltas.to_csv('daily_confirmed_deltas.csv', index=False )<save_to_csv>
kf = KFold(n_splits=5, random_state=2020, shuffle=True) for train_index, val_index in kf.split(X): print("TRAIN:", train_index, "TEST:", val_index) X_train, X_val = X[train_index], X[val_index] y_train, y_val = y[train_index], y[val_index]
Higgs Boson Machine Learning Challenge
10,784,932
deltas = train_clean[np.logical_and( train_clean.LogConfirmed > 2, ~train_clean.Location.str.startswith('China') )].dropna().sort_values(by='LogConfirmedDelta', ascending=False) deltas['start'] = deltas['LogConfirmed'].round(0) confirmed_deltas = pd.concat([ deltas.groupby('start')[['LogConfirmedDelta']].mean() , deltas.groupby('start')[['LogConfirmedDelta']].std() , deltas.groupby('start')[['LogConfirmedDelta']].count() ], axis=1) deltas.mean() confirmed_deltas.columns = ['avg', 'std', 'cnt'] confirmed_deltas confirmed_deltas.to_csv('confirmed_deltas.csv' )<feature_engineering>
Higgs Boson Machine Learning Challenge
10,784,932
DECAY = 0.93 DECAY ** 7, DECAY ** 14, DECAY ** 21, DECAY ** 28 confirmed_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 confirmed_deltas['DELTA'] = GLOBAL_DELTA confirmed_deltas.loc[confirmed_deltas.continent=='Africa', 'DELTA'] = 0.14 confirmed_deltas.loc[confirmed_deltas.continent=='Oceania', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Korea South', 'DELTA'] = 0.011 confirmed_deltas.loc[confirmed_deltas.Country_Region=='US', 'DELTA'] = 0.15 confirmed_deltas.loc[confirmed_deltas.Country_Region=='China', 'DELTA'] = 0.01 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Japan', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Norway', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iceland', 'DELTA'] = 0.05 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Austria', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Italy', 'DELTA'] = 0.04 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Spain', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Portugal', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Israel', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Iran', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Germany', 'DELTA'] = 0.07 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.06 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Russia', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Brazil', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Turkey', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Country_Region=='Philippines', 'DELTA'] = 0.18 confirmed_deltas.loc[confirmed_deltas.Location=='France-', 'DELTA'] = 0.1 confirmed_deltas.loc[confirmed_deltas.Location=='United Kingdom-', 'DELTA'] = 0.12 confirmed_deltas.loc[confirmed_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 confirmed_deltas.loc[confirmed_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.08 confirmed_deltas.loc[confirmed_deltas.Location=='San Marino-', 'DELTA'] = 0.03 confirmed_deltas.shape, confirmed_deltas.DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].shape, confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() confirmed_deltas[confirmed_deltas.DELTA != GLOBAL_DELTA] confirmed_deltas.describe()<save_to_csv>
X_train = X_train.reshape(-1, 1, 30) X_val = X_val.reshape(-1, 1, 30) y_train = y_train.values y_train = y_train.reshape(-1, 1,) y_val = y_val.values y_val = y_val.reshape(-1, 1, )
Higgs Boson Machine Learning Challenge
10,784,932
daily_log_confirmed = train_clean.pivot('Location', 'Date', 'LogConfirmed' ).reset_index() daily_log_confirmed = daily_log_confirmed.sort_values(TRAIN_END, ascending=False) daily_log_confirmed.to_csv('daily_log_confirmed.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in confirmed_deltas.Location.values: confirmed_delta = confirmed_deltas.loc[confirmed_deltas.Location == loc, 'DELTA'].values[0] daily_log_confirmed.loc[daily_log_confirmed.Location == loc, new_day] = daily_log_confirmed.loc[daily_log_confirmed.Location == loc, last_day] + \ confirmed_delta * DECAY ** i<save_to_csv>
input_layer = Input(shape=(1,30)) main_rnn_layer = LSTM(64, return_sequences=True, recurrent_dropout=0.2 )(input_layer) rnn = LSTM(32 )(main_rnn_layer) dense = Dense(128 )(rnn) dropout_c = Dropout(0.3 )(dense) classes = Dense(1, activation= LeakyReLU(alpha=0.1),name="class" )(dropout_c) model = Model(input_layer, classes) callbacks = [ReduceLROnPlateau(monitor='val_loss', patience=4, verbose=1, factor=0.6), EarlyStopping(monitor='val_loss', patience=20), ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)] model.compile(loss=[tf.keras.losses.MeanSquaredLogarithmicError() ,tf.keras.losses.MeanSquaredLogarithmicError() ], optimizer="adam") model.summary() history = model.fit(X_train, y_train, epochs = 250, batch_size = 16, validation_data=(X_val, y_val), callbacks=callbacks)
Higgs Boson Machine Learning Challenge
10,784,932
train_clean['Geo latest = train_clean[train_clean.Date == TRAIN_END][[ 'Geo daily_death_deltas = train_clean[train_clean.Date >= '2020-03-17'].pivot( 'Geo daily_death_deltas = latest.merge(daily_death_deltas, on='Geo daily_death_deltas.shape daily_death_deltas.head() daily_death_deltas.to_csv('daily_death_deltas.csv', index=False )<feature_engineering>
model.load_weights("best_model.h5") test = X_test test = test.reshape(-1, 1,30) predictions = model.predict(test )
Higgs Boson Machine Learning Challenge
10,784,932
death_deltas = train.groupby(['Location', 'Country_Region', 'continent'])[[ 'Id']].count().reset_index() GLOBAL_DELTA = 0.11 death_deltas['DELTA'] = GLOBAL_DELTA death_deltas.loc[death_deltas.Country_Region=='China', 'DELTA'] = 0.005 death_deltas.loc[death_deltas.continent=='Oceania', 'DELTA'] = 0.08 death_deltas.loc[death_deltas.Country_Region=='Korea South', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Japan', 'DELTA'] = 0.04 death_deltas.loc[death_deltas.Country_Region=='Singapore', 'DELTA'] = 0.05 death_deltas.loc[death_deltas.Country_Region=='Taiwan*', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='US', 'DELTA'] = 0.17 death_deltas.loc[death_deltas.Country_Region=='Switzerland', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Norway', 'DELTA'] = 0.15 death_deltas.loc[death_deltas.Country_Region=='Iceland', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Country_Region=='Austria', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Italy', 'DELTA'] = 0.07 death_deltas.loc[death_deltas.Country_Region=='Spain', 'DELTA'] = 0.1 death_deltas.loc[death_deltas.Country_Region=='Portugal', 'DELTA'] = 0.13 death_deltas.loc[death_deltas.Country_Region=='Israel', 'DELTA'] = 0.16 death_deltas.loc[death_deltas.Country_Region=='Iran', 'DELTA'] = 0.06 death_deltas.loc[death_deltas.Country_Region=='Germany', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Malaysia', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Country_Region=='Russia', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Ukraine', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Brazil', 'DELTA'] = 0.2 death_deltas.loc[death_deltas.Country_Region=='Turkey', 'DELTA'] = 0.22 death_deltas.loc[death_deltas.Country_Region=='Philippines', 'DELTA'] = 0.12 death_deltas.loc[death_deltas.Location=='France-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='United Kingdom-', 'DELTA'] = 0.14 death_deltas.loc[death_deltas.Location=='Diamond Princess-', 'DELTA'] = 0.00 death_deltas.loc[death_deltas.Location=='China-Hong Kong', 'DELTA'] = 0.01 death_deltas.loc[death_deltas.Location=='San Marino-', 'DELTA'] = 0.05 death_deltas.shape death_deltas.DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA].shape death_deltas[death_deltas.DELTA != GLOBAL_DELTA].DELTA.mean() death_deltas[death_deltas.DELTA != GLOBAL_DELTA] death_deltas.describe()<save_to_csv>
sub = pd.read_csv('.. /input/higgs-boson/random_submission.zip' )
Higgs Boson Machine Learning Challenge
10,784,932
daily_log_deaths = train_clean.pivot('Location', 'Date', 'LogFatalities' ).reset_index() daily_log_deaths = daily_log_deaths.sort_values(TRAIN_END, ascending=False) daily_log_deaths.to_csv('daily_log_deaths.csv', index=False) for i, d in tqdm(enumerate(pd.date_range(add_days(TRAIN_END, 1), add_days(TEST_END, 1)))) : new_day = str(d ).split(' ')[0] last_day = dt.datetime.strptime(new_day, DATEFORMAT)- dt.timedelta(days=1) last_day = last_day.strftime(DATEFORMAT) for loc in death_deltas.Location: death_delta = death_deltas.loc[death_deltas.Location == loc, 'DELTA'].values[0] daily_log_deaths.loc[daily_log_deaths.Location == loc, new_day] = daily_log_deaths.loc[daily_log_deaths.Location == loc, last_day] + \ death_delta * DECAY ** i<feature_engineering>
pred = np.where(predictions > 0.5, 1, 0) pred
Higgs Boson Machine Learning Challenge
10,784,932
confirmed = [] fatalities = [] for id, d, loc in tqdm(test[['ForecastId', 'Date', 'Location']].values): c = to_exp(daily_log_confirmed.loc[daily_log_confirmed.Location == loc, d].values[0]) f = to_exp(daily_log_deaths.loc[daily_log_deaths.Location == loc, d].values[0]) confirmed.append(c) fatalities.append(f )<prepare_output>
test_predict = pd.DataFrame({"EventId":sub['EventId'],"RankOrder":sub['RankOrder'],"Class":test_predict}) test_predict
Higgs Boson Machine Learning Challenge
10,784,932
my_submission = test.copy() my_submission['ConfirmedCases'] = confirmed my_submission['Fatalities'] = fatalities my_submission.shape my_submission.head() <save_to_csv>
test_predict = test_predict.replace(1,'s') test_predict = test_predict.replace(0,'b') test_predict
Higgs Boson Machine Learning Challenge
10,784,932
my_submission[[ 'ForecastId', 'ConfirmedCases', 'Fatalities' ]].to_csv('submission.csv', index=False) print(DECAY) my_submission.head() my_submission.tail() my_submission.shape<train_model>
test_predict['RankOrder'] = test_predict['Class'].argsort().argsort() + 1
Higgs Boson Machine Learning Challenge
10,784,932
<set_options><EOS>
test_predict.to_csv("submission.csv",index=False )
Higgs Boson Machine Learning Challenge
171,635
<SOS> metric: AUC Kaggle data source: west-nile-virus-prediction<load_from_csv>
import pandas as pd import numpy as np import math import scipy.stats as sps from time import time from sklearn import preprocessing, ensemble, metrics, feature_selection, model_selection, pipeline import xgboost as xgb from IPython.display import display from matplotlib import pyplot
West Nile Virus Prediction
171,635
dftrain = pd.read_csv('.. /input/covid19-global-forecasting-week-2/train.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date'] ).fillna('None') dftest = pd.read_csv('.. /input/covid19-global-forecasting-week-2/test.csv', parse_dates=['Date'] ).sort_values(by=['Country_Region', 'Date'] ).fillna('None') dfsubm = pd.read_csv('.. /input/covid19-global-forecasting-week-2/submission.csv' )<load_from_csv>
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d') dtype_map_weather = dict(Station = 'str') dtype_map_test_train = dict(Block = 'str', Street = 'str') test = pd.read_csv('.. /input/test.csv', parse_dates=['Date'], date_parser=dateparse, dtype= dtype_map_test_train) train = pd.read_csv('.. /input/train.csv', parse_dates=['Date'], date_parser=dateparse, dtype= dtype_map_test_train) weather = pd.read_csv('.. /input/weather.csv', parse_dates=['Date'], date_parser=dateparse, dtype= dtype_map_weather) sample_sub = pd.read_csv('.. /input/sampleSubmission.csv' )
West Nile Virus Prediction
171,635
confirmed = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv' ).sort_values(by='Country/Region') deaths = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv') recovered = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv' )<feature_engineering>
weather_exclude = ['Dewpoint', 'WetBulb', 'CodeSum', 'Depth', 'Water1', 'SnowFall', 'StnPressure', 'SeaLevel', 'ResultSpeed', 'ResultDir', 'AvgSpeed','DewPoint'] weather_cols = [col for col in weather.columns if col not in weather_exclude] weather = weather[weather_cols] train_exclude = ['Address', 'AddressNumberAndStreet', 'AddressAccuracy', 'NumMosquitos'] train_cols = [col for col in train.columns if col not in train_exclude] train = train[train_cols] test_exclude = ['Address', 'AddressNumberAndStreet', 'AddressAccuracy', 'Id'] test_cols = [col for col in test.columns if col not in test_exclude] test = test[test_cols]
West Nile Virus Prediction
171,635
confirmed['Country_Region'] = confirmed['Country/Region'] confirmed['Province_State'] = confirmed['Province/State'] confirmed.head()<merge>
miss_weather = ['M', '-'] trace_weather = ['T']
West Nile Virus Prediction
171,635
dftrain = dftrain.join(confirmed[['Country_Region', 'Province_State', 'Lat', 'Long']].set_index(['Province_State', 'Country_Region']), on=['Province_State', 'Country_Region'] )<feature_engineering>
cols_not_date = [col for col in weather.columns if col != 'Date']
West Nile Virus Prediction
171,635
dftrain['Dayofyear'] = dftrain['Date'].dt.dayofyear dftest['Dayofyear'] = dftest['Date'].dt.dayofyear<data_type_conversions>
weather[cols_not_date].apply(pd.value_counts, axis=1)[miss_weather + trace_weather].fillna(0 ).sum()
West Nile Virus Prediction
171,635
def transpose_df(df): df = df.drop(['Lat','Long'],axis=1 ).groupby('Country/Region' ).sum().T df.index = pd.to_datetime(df.index) return df<feature_engineering>
check = weather[cols_not_date].apply(pd.value_counts, axis=0 ).fillna(0) check.loc[['M', '-', 'T']]
West Nile Virus Prediction
171,635
confirmedT = transpose_df(confirmed) deathsT = transpose_df(deaths) recoveredT = transpose_df(recovered) mortalityT = deathsT/confirmedT<feature_engineering>
check_stat1 = weather[cols_not_date][weather.Station == '1'].apply(pd.value_counts, axis=0 ).fillna(0) check_stat1.loc[['M', '-', 'T']]
West Nile Virus Prediction
171,635
def add_day(df): df['Date'] = df.index df['Dayofyear'] = df['Date'].dt.dayofyear return df<drop_column>
check_stat2 = weather[cols_not_date][weather.Station == '2'].apply(pd.value_counts, axis=0 ).fillna(0) check_stat2.loc[['M', '-', 'T']]
West Nile Virus Prediction
171,635
confirmedT, deathsT, recoveredT, mortalityT = add_day(confirmedT), add_day(deathsT), add_day(recoveredT), add_day(mortalityT )<sort_values>
check.loc[['M', '-', 'T']]/(len(weather)) * 100
West Nile Virus Prediction
171,635
allcountries_ordered = confirmed.set_index(['Country_Region'] ).iloc[:,-2].sort_values(ascending=False ).index.tolist()<save_to_csv>
check_stat1.loc[['M', '-', 'T']]/(len(weather)) * 100
West Nile Virus Prediction
171,635
confirmed.set_index(['Country_Region'] ).iloc[:,-2].sort_values(ascending=False ).to_csv('confirmed_countries.csv' )<feature_engineering>
check_stat2.loc[['M', '-', 'T']]/(len(weather)) * 100
West Nile Virus Prediction
171,635
def df_day1(df, confirmed): df_day1 = pd.DataFrame({'Days since 100 cases' : np.arange(1000)} ).set_index('Days since 100 cases') countries_df = df.columns.tolist() [:-2] countries_conf = confirmed.columns.tolist() [:-2] for ic, country in enumerate(countries_df): for ic2, country2 in enumerate(countries_conf): if country == country2: dfsub = df[confirmed[country] > 100.][country] df_day1[country] = np.nan df_day1.loc[:len(dfsub)-1,country] =(dfsub ).tolist() df_day1 = df_day1.dropna(how='all') return df_day1<define_variables>
weather = weather.replace('M', np.NaN) weather = weather.replace('-', np.NaN) weather = weather.replace('T', 0.005) weather.Tmax = weather.Tmax.fillna(method = 'ffill') weather.Tmin = weather.Tmin.fillna(method = 'ffill') weather.Depart = weather.Depart.fillna(method = 'ffill') weather.Heat = weather.Heat.fillna(method = 'ffill') weather.Cool = weather.Cool.fillna(method = 'ffill') weather.PrecipTotal = weather.PrecipTotal.fillna(method = 'ffill' )
West Nile Virus Prediction
171,635
confirmed_day1 = df_day1(confirmedT, confirmedT) deaths_day1 = df_day1(deathsT, confirmedT) recovered_day1 = df_day1(recoveredT, confirmedT) mortality_day1 = df_day1(mortalityT, confirmedT) confirmednorm_day1 = confirmed_day1/confirmed_day1.loc[0,:] maxday = confirmed_day1.shape[0]<data_type_conversions>
to_numeric = ['Tmax','Tmin','Tavg', 'Depart', 'Heat', 'Cool', 'PrecipTotal'] for col in to_numeric: weather[col]= pd.to_numeric(weather[col] )
West Nile Virus Prediction
171,635
date_day1 = confirmedT.copy() for column in date_day1: date_day1[column] = confirmedT.index.tolist() date_day1 = df_day1(date_day1, confirmedT )<compute_train_metric>
weather.Sunrise = weather.Sunrise.fillna(method = 'ffill') weather.Sunset = weather.Sunset.fillna(method = 'ffill' )
West Nile Virus Prediction
171,635
def logistic_curve(x, k, x_0, ymax): return ymax /(1 + np.exp(-k*(x-x_0)) )<compute_train_metric>
counter = 0 tracker = [] for index, val in enumerate(weather.Sunset): try: pd.to_datetime(val, format = '%H%M' ).time() except: counter += 1 tracker.append(( index, val, val[2:], counter)) print(tracker[-1])
West Nile Virus Prediction
171,635
def logistic_curve2(x, k1, k2, x_0, ymax): return ymax /(1 + np.exp(-k1*(x-x_0)) + np.exp(-k2*(x-x_0)) )<data_type_conversions>
time_func = lambda x: pd.Timestamp(pd.to_datetime(x, format = '%H%M'))
West Nile Virus Prediction
171,635
list_countries = dftrain[dftrain['Date'] == '2020-01-22']['Country_Region'].tolist() list_states = dftrain[dftrain['Date'] == '2020-01-22']['Province_State'].tolist() datenow = datetime.now()<data_type_conversions>
weather.Sunrise = weather.Sunrise.apply(time_func )
West Nile Virus Prediction
171,635
list_date_pand = [] ; list_maxcases = []; list_maxfat = [] for country, state in list(zip(list_countries, list_states)) : df2 = dftrain.loc[(dftrain['Country_Region'] == country)&(dftrain['Province_State'] == state)].fillna('None') maxcases, maxfat = df2['ConfirmedCases'].max() , df2['Fatalities'].max() date_pand2 = [] date_pand = df2[df2['ConfirmedCases'] > 100.]['Date'].tolist() try: list_date_pand.append(pd.to_datetime(date_pand[0])) except: list_date_pand.append(pd.to_datetime(datenow)) list_maxcases.append(maxcases); list_maxfat.append(maxfat) dfstartpand = pd.DataFrame(np.array([list_countries, list_states, list_date_pand, list_maxcases, list_maxfat] ).T, columns=['Country_Region', 'Province_State', 'Start_Pandemic', 'ConfirmedCases', 'Fatalities']) dfstartpand['Start_Pandemic'] = dfstartpand['Start_Pandemic'].dt.date<sort_values>
weather.Sunset = weather.Sunset.apply(time_func )
West Nile Virus Prediction
171,635
dfstartpand_ordered = dfstartpand.sort_values(by=['Start_Pandemic', 'ConfirmedCases', 'Fatalities'], ascending=[True, False, False]) country_state_ordered = list(zip(dfstartpand_ordered['Country_Region'].tolist() , dfstartpand_ordered['Province_State'])) datetrain = dftrain['Date'].unique() datetest = dftest['Date'].unique()<define_variables>
minutes=(weather.Sunset - weather.Sunrise ).astype('timedelta64[m]' )
West Nile Virus Prediction
171,635
dftest['ConfirmedCases_logreg'] = 0.0 ; dftrain['ConfirmedCases_logreg'] = 0.0 dftest['Fatalities_logreg'] = 0.0 ; dftrain['Fatalities_logreg'] = 0.0 p0 = 1 for country, state in country_state_ordered: masktrain =(dftrain['Country_Region'] == country)&(dftrain['Province_State'] == state) masktrain2 =(dftrain['Country_Region'] == country)&(dftrain['Province_State'] == state)&(dftrain['Date'] <= '2020-03-31')&(dftrain['Date'] >= starttest) masktest =(dftest['Country_Region'] == country)&(dftest['Province_State'] == state) masktest2 =(dftest['Country_Region'] == country)&(dftest['Province_State'] == state)&(dftest['Date'] <= '2020-03-31') df2plot = dftrain[masktrain].set_index('Date') X = np.arange(len(df2plot)) X_test =(np.timedelta64(datetest[0]-datetrain[0], 'D')).astype(float)+np.arange(0,len(datetest)) try: y = df2plot['ConfirmedCases'] p0_cases = [1/(len(X)/2.) , X[-1], y.max() ] popt, pcov = curve_fit(logistic_curve, X, y, p0=p0_cases,bounds=([0,0,0],np.inf), maxfev=1000) k_cases, x_0_cases, ymax_cases = popt cases_train_fc = pd.Series(logistic_curve(X, k_cases, x_0_cases, ymax_cases),index=df2plot.index) cases_test_fc = pd.Series(logistic_curve(X_test, k_cases, x_0_cases, ymax_cases),index=datetest) dftest.loc[masktest,'ConfirmedCases_logreg'] = cases_test_fc.tolist() dftrain.loc[masktrain,'ConfirmedCases_logreg'] = cases_train_fc.tolist() except: print(country+' '+state+' Unable to fit the confirmed cases') dftest.loc[masktest,'ConfirmedCases_logreg'] = dftrain.loc[masktrain,'ConfirmedCases'].iloc[-1] dftrain.loc[masktrain,'ConfirmedCases_logreg'] = dftrain.loc[masktrain,'ConfirmedCases'] try: y = df2plot['Fatalities'] p0_deaths = [1/(len(X)/2.) , X[-1], y.max() ] popt, pcov = curve_fit(logistic_curve, X, y, p0=p0_deaths,bounds=([0,0,0],np.inf), maxfev=1000) k_deaths, x_0_deaths, ymax_deaths = popt deaths_train_fc = pd.Series(logistic_curve(X, k_deaths, x_0_deaths, ymax_deaths),index=datetrain) deaths_test_fc = pd.Series(logistic_curve(X_test, k_deaths, x_0_deaths, ymax_deaths),index=datetest) dftest.loc[masktest,'Fatalities_logreg'] = deaths_test_fc.tolist() dftrain.loc[masktrain,'Fatalities_logreg'] = deaths_train_fc.tolist() except: print(country+' '+state+' Unable to fit the fatalities') dftest.loc[masktest,'Fatalities_logreg'] = dftrain.loc[masktrain,'Fatalities'].iloc[-1] dftrain.loc[masktrain,'Fatalities_logreg'] = dftrain.loc[masktrain,'Fatalities'] dftest.loc[masktest2,'ConfirmedCases_logreg'] = dftrain.loc[masktrain2,'ConfirmedCases'].tolist() dftest.loc[masktest2,'Fatalities_logreg'] = dftrain.loc[masktrain2,'Fatalities'].tolist()<data_type_conversions>
hours = minutes/60
West Nile Virus Prediction
171,635
dfsubm['ConfirmedCases'] = dftest['ConfirmedCases_logreg'] dfsubm['Fatalities'] = dftest['Fatalities_logreg']<save_to_csv>
weather['DayLength_MPrec'] =(weather.Sunset - weather.Sunrise ).astype('timedelta64[m]')/60
West Nile Virus Prediction
171,635
dfsubm.to_csv('submission.csv', index=False )<define_variables>
weather['DayLength_NearH'] = np.round(((weather.Sunset - weather.Sunrise ).astype('timedelta64[m]')/60 ).values )
West Nile Virus Prediction
171,635
DATASET_DIR = ".. /input/covid19-global-forecasting-week-2" TRAIN_FILE = DATASET_DIR + "/train.csv" TEST_FILE = DATASET_DIR + "/test.csv"<set_options>
weather['NightLength_MPrec']= 24.0 - weather.DayLength_MPrec
West Nile Virus Prediction
171,635
%matplotlib inline <load_from_csv>
weather['NightLength_NearH']= 24.0 - weather.DayLength_NearH
West Nile Virus Prediction
171,635
df_train = pd.read_csv(TRAIN_FILE) df_train.head()<load_from_csv>
hours_RiseSet_func = lambda x: x.minute/60.0 + float(x.hour )
West Nile Virus Prediction
171,635
df_test = pd.read_csv(TEST_FILE) df_test.head(5 )<count_missing_values>
weather['Sunrise_hours'] = weather.Sunrise.apply(hours_RiseSet_func )
West Nile Virus Prediction
171,635
df_train["Province_State"].isnull().sum()<count_missing_values>
weather['Sunset_hours'] = weather.Sunset.apply(hours_RiseSet_func )
West Nile Virus Prediction
171,635
df_train.isnull().sum()<filter>
mean_func = lambda x: x.mean() blend_cols = ['Tmax', 'Tmin', 'Depart' ,'Heat', 'Cool', 'PrecipTotal']
West Nile Virus Prediction
171,635
df_train[df_train["Province_State"].notnull() ]<data_type_conversions>
blended_cols= ['blended_' + col for col in blend_cols]
West Nile Virus Prediction
171,635
df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True) df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True )<data_type_conversions>
station_1 = weather[blend_cols][weather.Station == '1'] station_2 = weather[blend_cols][weather.Station == '2']
West Nile Virus Prediction
171,635
pd.plotting.register_matplotlib_converters() grouped_data = df_train.groupby('Date')['Date', 'ConfirmedCases', 'Fatalities'].sum().reset_index() grouped_data = grouped_data.sort_values(by=['Date'], ascending=True) grouped_data['ConfirmedCases'] = grouped_data['ConfirmedCases'].astype(int) grouped_data['Fatalities'] = grouped_data['Fatalities'].astype(int) grouped_data.head()<data_type_conversions>
station_blend = pd.DataFrame(( station_1.values + station_2.values)/2, columns= blended_cols )
West Nile Virus Prediction
171,635
df_train['Date'] = pd.to_datetime(df_train['Date']) grouped_data = df_train.groupby(['Date'],as_index=True ).agg({'ConfirmedCases': 'max','Fatalities': 'max'}) grouped_data['ConfirmedCases'] = grouped_data['ConfirmedCases'].astype(int) grouped_data['Fatalities'] = grouped_data['Fatalities'].astype(int) display(grouped_data.head() )<data_type_conversions>
extract_2 = weather[weather.Station == '2'].reset_index(drop = True) extract_2.head()
West Nile Virus Prediction
171,635
grouped_data = df_train.groupby('Country_Region')['ConfirmedCases', 'Fatalities'].sum().reset_index() grouped_data = grouped_data.sort_values(by=['ConfirmedCases'], ascending=False) grouped_data['ConfirmedCases'] = grouped_data['ConfirmedCases'].astype(int) grouped_data['Fatalities'] = grouped_data['Fatalities'].astype(int) grouped_data.head(10 )<sort_values>
extract_1 = weather[weather.Station == '1'].reset_index(drop = True) extract_1.head()
West Nile Virus Prediction
171,635
grouped_data = df_train.groupby('Country_Region')['ConfirmedCases', 'Fatalities'].sum().reset_index() grouped_data_sort_confirmed_cases = grouped_data.sort_values(by=['ConfirmedCases', 'Fatalities'], ascending=False)[:10] grouped_data_sort_fatalities = grouped_data.sort_values(by=['Fatalities', 'ConfirmedCases'], ascending=False)[:10]<data_type_conversions>
joined_1 = extract_1.join(station_blend) joined_2 = extract_2.join(station_blend )
West Nile Virus Prediction
171,635
train_data = df_train.copy() test_data = df_test.copy() train_data['Date'] = train_data['Date'].dt.strftime("%m%d" ).astype(int) test_data['Date'] = test_data['Date'].dt.strftime("%m%d" ).astype(int) <data_type_conversions>
weather_blend = pd.concat([joined_1, joined_2] )
West Nile Virus Prediction
171,635
train_data['Province_State'] = train_data['Province_State'].fillna("N/D") test_data['Province_State'] = test_data['Province_State'].fillna("N/D" )<categorify>
month_func = lambda x: x.month day_func= lambda x: x.day day_of_year_func = lambda x: x.dayofyear week_of_year_func = lambda x: x.week train['month'] = train.Date.apply(month_func) train['day'] = train.Date.apply(day_func) train['day_of_year'] = train.Date.apply(day_of_year_func) train['week'] = train.Date.apply(week_of_year_func) test['month'] = test.Date.apply(month_func) test['day'] = test.Date.apply(day_func) test['day_of_year'] = test.Date.apply(day_of_year_func) test['week'] = test.Date.apply(week_of_year_func )
West Nile Virus Prediction
171,635
LE=LabelEncoder() train_data['Province_State']=LE.fit_transform(train_data['Province_State']) test_data['Province_State']=LE.transform(test_data['Province_State']) train_data['Country_Region']=LE.fit_transform(train_data['Country_Region']) test_data['Country_Region']=LE.transform(test_data['Country_Region'] )<prepare_x_and_y>
weather_blend = weather_blend.drop(['Sunrise', 'Sunset'], axis= 1 )
West Nile Virus Prediction
171,635
x_cols = ['Date','Province_State', 'Country_Region'] y_cols = ['ConfirmedCases', 'Fatalities'] train_data[x_cols].head(10 )<train_model>
train = train.merge(weather_blend, on='Date') test = test.merge(weather_blend, on='Date' )
West Nile Virus Prediction
171,635
model_dtr = DecisionTreeRegressor(max_depth=None) param_grid = { 'n_estimators': [100, 200, 300], 'learning_rate': [0.1, 0.01, 0.001] } model_abr = AdaBoostRegressor(base_estimator=model_dtr) model = RandomizedSearchCV(estimator = model_abr, param_distributions = param_grid, n_iter = 100, cv =10, verbose=0, random_state=2020, n_jobs = -1) model.fit(train_data[x_cols], train_data[y_cols[0]]) predictions1 = model.predict(test_data[x_cols]) model = RandomizedSearchCV(estimator = model_abr, param_distributions = param_grid, n_iter = 100, cv = 10, verbose=0, random_state=2020, n_jobs = -1) model.fit(train_data[x_cols], train_data[y_cols[1]]) predictions2 = model.predict(test_data[x_cols] )<create_dataframe>
cols_to_write = [col for col in train.columns if col != 'Date']
West Nile Virus Prediction
171,635
submission = pd.DataFrame({'ForecastId':test_data['ForecastId'],'ConfirmedCases':predictions1,'Fatalities':predictions2}) submission['ConfirmedCases'] = submission['ConfirmedCases'].astype(int) submission['Fatalities'] = submission['Fatalities'].astype(int) submission.head(50 )<save_to_csv>
train_station_1= train[train.Station == '1'] train_station_2= train[train.Station == '2'] test_station_1= test[test.Station == '1'] test_station_2= test[test.Station == '2']
West Nile Virus Prediction
171,635
filename = 'submission.csv' submission.to_csv(filename,index=False) print('Saved file: ' + filename )<compute_test_metric>
train.to_csv('train.csv' )
West Nile Virus Prediction
171,635
%matplotlib inline def sigmoid_sqrt_func(x, a, b, c, d, e): return c + d /(1.0 + np.exp(-a*x+b)) + e*x**0.5 def sigmoid_linear_func(x, a, b, c, d, e): return c + d /(1.0 + np.exp(-a*x+b)) + e*0.1*x def sigmoid_quad_func(x, a, b, c, d, e, f): return c + d /(1.0 + np.exp(-a*x+b)) + e*0.1*x + f*0.001*x*x def sigmoid_func(x, a, b, c, d): return c + d /(1.0 + np.exp(-a*x+b)) def exp_func(x, a, b, c, d): return c + d * np.exp(a*x+b) def func_fitting(y, func=sigmoid_func, x_scale=50.0, y_scale=10000.0, start_pred=8, AN=0, MAXN=60, PN=15, b=5): x = range(len(y)) x_real = np.array(x)/x_scale y_real = np.array(y)/y_scale x_train = x_real y_train = y_real def next_day_pred(AN, BN): x_train = x_real[AN:BN] y_train = y_real[AN:BN] popt, pcov = curve_fit(func, x_train, y_train, method='trf', maxfev=20000, p0=(1, 0, 0, 1), bounds=[(-b, -np.inf, -np.inf, -b),(b, np.inf, np.inf, b)], ) x_pred = np.array(range(MAXN)) /x_scale y_pred = func(x_pred, *popt) return x_pred, y_pred NP = start_pred y_pred = [np.nan]*NP y_pred_list = [] for BN in range(NP, len(y_real)) : x_pred, y_pred_ = next_day_pred(BN-PN, BN) y_pred.append(y_pred_[BN]) y_pred_list.append(y_pred_) for BN in range(len(y_real), len(y_pred_)) : y_pred.append(y_pred_[BN]) y_pred = np.array(y_pred) y_pred_list = np.array(y_pred_list) y_pred_std = np.std(y_pred_list[-2:], axis=0) return x_real*x_scale, y_real*y_scale, x_train*x_scale, y_train*y_scale, \ x_pred*x_scale, y_pred*y_scale, y_pred_std*y_scale def draw_figure(start_date, title, x_real, y_real, x_train, y_train, x_pred, y_pred, y_pred_std): def to_date(idx): idx = np.round(idx) return datetime.datetime.strptime(start_date, '%m/%d/%Y' ).date() + datetime.timedelta(days=idx) fig, ax1 = plt.subplots(figsize=[14, 7]) plot1 = ax1.plot(list(map(to_date, x_real)) , y_real, 'gs',label='original') plot2 = ax1.plot(list(map(to_date, x_pred)) , y_pred, 'r',label='predict') plot3 = ax1.fill_between(list(map(to_date, x_pred)) , np.maximum(0,(y_pred-y_pred_std)) , (y_pred+y_pred_std), alpha=0.2, edgecolor=' plot0 = ax1.plot(list(map(to_date, x_train)) , y_train, 'y.',label='history') ax2=ax1.twinx() ax2.plot(list(map(to_date, x_real)) [1:],(y_real[1:]-y_real[:-1]), '-s',label='original add') ax2.plot(list(map(to_date, x_pred)) [1:],(y_pred[1:]-y_pred[:-1]), '-',label='pred add') plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m-%d')) plt.gca().xaxis.set_major_locator(mdates.DayLocator()) plt.gcf().autofmt_xdate() plt.xlabel('x') plt.ylabel('y') fig.legend(loc=2) plt.title(title) plt.savefig('{}.pdf'.format(title)) plt.show() date = list(map(to_date, x_pred)) pred = y_pred real = y_real for i in range(len(pred)) : if i < len(real): print('{}\t{:.0f}\t{:.0f}\t{:.3f}'.format(date[i], real[i], pred[i], np.abs(pred[i]-real[i])/real[i]*100)) else: print('{}\t-\t{:.0f}'.format(date[i], pred[i])) return pred <load_from_csv>
keep_cols = ['Date', u'Tmax', u'Tmin', u'Tavg',u'PrecipTotal'] train_station_2 = train_station_2[keep_cols] test_station_2 = test_station_2[keep_cols] prefix_s2 = 'stat_2_' rename_cols_s2 = [prefix_s2 + col for col in train_station_2.columns] train_station_2.columns = rename_cols_s2 test_station_2.columns = rename_cols_s2
West Nile Virus Prediction
171,635
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv') pred_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/submission.csv') train_data = train_data.fillna(value='NULL') test_data = test_data.fillna(value='NULL' )<count_unique_values>
drop_cols = ['Heat', 'Cool', 'Depart', 'NightLength_MPrec', 'NightLength_NearH', 'blended_Depart', 'blended_Heat', 'blended_Cool'] train_station_1 = train_station_1.drop(drop_cols, axis= 1) test_station_1 = test_station_1.drop(drop_cols, axis= 1 )
West Nile Virus Prediction
171,635
train_date_list = train_data.iloc[:, 3].unique() print(len(train_date_list)) print(train_date_list) test_date_list = test_data.iloc[:, 3].unique() print(len(test_date_list)) print(test_date_list) len(train_data.groupby(['Province_State', 'Country_Region'])) len(test_data.groupby(['Province_State', 'Country_Region']))<load_from_csv>
prefix_s1 = 'stat_1_' rename_cols_s1 = [prefix_s1 + col for col in keep_cols] cols_to_rename= [col for col in train_station_1.columns if col in keep_cols] s1_name_map = dict(zip(cols_to_rename, rename_cols_s1)) train_station_1 = train_station_1.rename(columns= s1_name_map) test_station_1 = test_station_1.rename(columns= s1_name_map )
West Nile Virus Prediction