import os
HF_TOKEN = os.getenv("HF_TOKEN")
import numpy as np
import pandas as pd
import sklearn
import sklearn.metrics
from math import sqrt
from scipy import stats as st
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
import shap
import gradio as gr
import random
import re
import textwrap
from datasets import load_dataset
#Read data training data.
g2_x1 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_12m_data_train.csv", use_auth_token = HF_TOKEN)
g2_x1 = pd.DataFrame(g2_x1['train'])
g2_x1 = g2_x1.iloc[:, 1:]
g2_x2 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_24m_data_train.csv", use_auth_token = HF_TOKEN)
g2_x2 = pd.DataFrame(g2_x2['train'])
g2_x2 = g2_x2.iloc[:, 1:]
g2_x3 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_36m_data_train.csv", use_auth_token = HF_TOKEN)
g2_x3 = pd.DataFrame(g2_x3['train'])
g2_x3 = g2_x3.iloc[:, 1:]
g2_x4 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_60m_data_train.csv", use_auth_token = HF_TOKEN)
g2_x4 = pd.DataFrame(g2_x4['train'])
g2_x4 = g2_x4.iloc[:, 1:]
#Read validation data.
g2_x1_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_12m_data_valid.csv", use_auth_token = HF_TOKEN)
g2_x1_valid = pd.DataFrame(g2_x1_valid['train'])
g2_x1_valid = g2_x1_valid.iloc[:, 1:]
g2_x2_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_24m_data_valid.csv", use_auth_token = HF_TOKEN)
g2_x2_valid = pd.DataFrame(g2_x2_valid['train'])
g2_x2_valid = g2_x2_valid.iloc[:, 1:]
g2_x3_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_36m_data_valid.csv", use_auth_token = HF_TOKEN)
g2_x3_valid = pd.DataFrame(g2_x3_valid['train'])
g2_x3_valid = g2_x3_valid.iloc[:, 1:]
g2_x4_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g2_60m_data_valid.csv", use_auth_token = HF_TOKEN)
g2_x4_valid = pd.DataFrame(g2_x4_valid['train'])
g2_x4_valid = g2_x4_valid.iloc[:, 1:]
#Define feature names (g2).
g2_f1_names = list(g2_x1.columns)
g2_f1_names = [f1.replace('__', ' - ') for f1 in g2_f1_names]
g2_f1_names = [f1.replace('_', ' ') for f1 in g2_f1_names]
g2_f2_names = list(g2_x2.columns)
g2_f2_names = [f2.replace('__', ' - ') for f2 in g2_f2_names]
g2_f2_names = [f2.replace('_', ' ') for f2 in g2_f2_names]
g2_f3_names = list(g2_x3.columns)
g2_f3_names = [f3.replace('__', ' - ') for f3 in g2_f3_names]
g2_f3_names = [f3.replace('_', ' ') for f3 in g2_f3_names]
g2_f4_names = list(g2_x4.columns)
g2_f4_names = [f4.replace('__', ' - ') for f4 in g2_f4_names]
g2_f4_names = [f4.replace('_', ' ') for f4 in g2_f4_names]
#Prepare data for the outcome 1 (g2).
g2_y1 = g2_x1.pop('OUTCOME')
g2_y1_valid = g2_x1_valid.pop('OUTCOME')
#Prepare data for the outcome 2 (g2).
g2_y2 = g2_x2.pop('OUTCOME')
g2_y2_valid = g2_x2_valid.pop('OUTCOME')
#Prepare data for the outcome 3 (g2).
g2_y3 = g2_x3.pop('OUTCOME')
g2_y3_valid = g2_x3_valid.pop('OUTCOME')
#Prepare data for the outcome 4 (g2).
g2_y4 = g2_x4.pop('OUTCOME')
g2_y4_valid = g2_x4_valid.pop('OUTCOME')
#Assign hyperparameters (g2).
g2_y1_params = {'criterion': 'gini', 'max_features': 'log2', 'max_depth': 4, 'n_estimators': 900, 'min_samples_leaf': 3, 'min_samples_split': 3, 'random_state': 31}
g2_y2_params = {'objective': 'binary', 'boosting_type': 'gbdt', 'lambda_l1': 6.121600211393574e-07, 'lambda_l2': 0.0028998418721597743, 'num_leaves': 2, 'feature_fraction': 0.6798107660641116, 'bagging_fraction': 0.42950125330169564, 'bagging_freq': 7, 'min_child_samples': 87, 'metric': 'binary_logloss', 'verbosity': -1, 'random_state': 31}
g2_y3_params = {'objective': 'binary', 'boosting_type': 'gbdt', 'lambda_l1': 6.806711570427862e-07, 'lambda_l2': 0.003064768550805565, 'num_leaves': 2, 'feature_fraction': 0.6780931825384188, 'bagging_fraction': 0.5046150209604292, 'bagging_freq': 3, 'min_child_samples': 69, 'metric': 'binary_logloss', 'verbosity': -1, 'random_state': 31}
g2_y4_params = {'criterion': 'gini', 'max_features': 'log2', 'max_depth': 5, 'n_estimators': 800, 'min_samples_leaf': 3, 'min_samples_split': 2, 'random_state': 31}
#Training models (g2).
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(**g2_y1_params)
g2_y1_model = rf
g2_y1_model = g2_y1_model.fit(g2_x1, g2_y1)
g2_y1_explainer = shap.Explainer(g2_y1_model.predict, g2_x1)
g2_y1_calib_probs = g2_y1_model.predict_proba(g2_x1_valid)
g2_y1_calib_model = LogisticRegression()
g2_y1_calib_model = g2_y1_calib_model.fit(g2_y1_calib_probs, g2_y1_valid)
from lightgbm import LGBMClassifier
lgb = LGBMClassifier(**g2_y2_params)
g2_y2_model = lgb
g2_y2_model = g2_y2_model.fit(g2_x2, g2_y2)
g2_y2_explainer = shap.Explainer(g2_y2_model.predict, g2_x2)
g2_y2_calib_probs = g2_y2_model.predict_proba(g2_x2_valid)
g2_y2_calib_model = LogisticRegression()
g2_y2_calib_model = g2_y2_calib_model.fit(g2_y2_calib_probs, g2_y2_valid)
from lightgbm import LGBMClassifier
lgb = LGBMClassifier(**g2_y3_params)
g2_y3_model = lgb
g2_y3_model = g2_y3_model.fit(g2_x3, g2_y3)
g2_y3_explainer = shap.Explainer(g2_y3_model.predict, g2_x3)
g2_y3_calib_probs = g2_y3_model.predict_proba(g2_x3_valid)
g2_y3_calib_model = LogisticRegression()
g2_y3_calib_model = g2_y3_calib_model.fit(g2_y3_calib_probs, g2_y3_valid)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(**g2_y4_params)
g2_y4_model = rf
g2_y4_model = g2_y4_model.fit(g2_x4, g2_y4)
g2_y4_explainer = shap.Explainer(g2_y4_model.predict, g2_x4)
g2_y4_calib_probs = g2_y4_model.predict_proba(g2_x4_valid)
g2_y4_calib_model = LogisticRegression()
g2_y4_calib_model = g2_y4_calib_model.fit(g2_y4_calib_probs, g2_y4_valid)
#Define output functions (g2).
g2_output_y1 = (
"""
The probability of 12-month survival:
{:.2f}%
"""
)
g2_output_y2 = (
"""
The probability of 24-month survival:
{:.2f}%
"""
)
g2_output_y3 = (
"""
The probability of 36-month survival:
{:.2f}%
"""
)
g2_output_y4 = (
"""
The probability of 60-month survival:
{:.2f}%
"""
)
#Define predict for y1 (g2).
def g2_y1_predict(*args):
g2_df1 = pd.DataFrame([args], columns=g2_x1.columns)
pos_pred = g2_y1_model.predict_proba(g2_df1)
pos_pred = g2_y1_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g2_output_y1.format(prob * 100)
return output
#Define predict for y2 (g2).
def g2_y2_predict(*args):
g2_df2 = pd.DataFrame([args], columns=g2_x2.columns)
pos_pred = g2_y2_model.predict_proba(g2_df2)
pos_pred = g2_y2_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g2_output_y2.format(prob * 100)
return output
#Define predict for y3 (g2).
def g2_y3_predict(*args):
g2_df3 = pd.DataFrame([args], columns=g2_x3.columns)
pos_pred = g2_y3_model.predict_proba(g2_df3)
pos_pred = g2_y3_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g2_output_y3.format(prob * 100)
return output
#Define predict for y4 (g2).
def g2_y4_predict(*args):
g2_df4 = pd.DataFrame([args], columns=g2_x4.columns)
pos_pred = g2_y4_model.predict_proba(g2_df4)
pos_pred = g2_y4_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g2_output_y4.format(prob * 100)
return output
#Read data training data.
g3_x1 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_12m_data_train.csv", use_auth_token = HF_TOKEN)
g3_x1 = pd.DataFrame(g3_x1['train'])
g3_x1 = g3_x1.iloc[:, 1:]
g3_x2 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_24m_data_train.csv", use_auth_token = HF_TOKEN)
g3_x2 = pd.DataFrame(g3_x2['train'])
g3_x2 = g3_x2.iloc[:, 1:]
g3_x3 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_36m_data_train.csv", use_auth_token = HF_TOKEN)
g3_x3 = pd.DataFrame(g3_x3['train'])
g3_x3 = g3_x3.iloc[:, 1:]
g3_x4 = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_60m_data_train.csv", use_auth_token = HF_TOKEN)
g3_x4 = pd.DataFrame(g3_x4['train'])
g3_x4 = g3_x4.iloc[:, 1:]
#Read validation data.
g3_x1_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_12m_data_valid.csv", use_auth_token = HF_TOKEN)
g3_x1_valid = pd.DataFrame(g3_x1_valid['train'])
g3_x1_valid = g3_x1_valid.iloc[:, 1:]
g3_x2_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_24m_data_valid.csv", use_auth_token = HF_TOKEN)
g3_x2_valid = pd.DataFrame(g3_x2_valid['train'])
g3_x2_valid = g3_x2_valid.iloc[:, 1:]
g3_x3_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_36m_data_valid.csv", use_auth_token = HF_TOKEN)
g3_x3_valid = pd.DataFrame(g3_x3_valid['train'])
g3_x3_valid = g3_x3_valid.iloc[:, 1:]
g3_x4_valid = load_dataset("mertkarabacak/G2G3-Glioma", data_files="g3_60m_data_valid.csv", use_auth_token = HF_TOKEN)
g3_x4_valid = pd.DataFrame(g3_x4_valid['train'])
g3_x4_valid = g3_x4_valid.iloc[:, 1:]
#Define feature names (g3).
g3_f1_names = list(g3_x1.columns)
g3_f1_names = [f1.replace('__', ' - ') for f1 in g3_f1_names]
g3_f1_names = [f1.replace('_', ' ') for f1 in g3_f1_names]
g3_f2_names = list(g3_x2.columns)
g3_f2_names = [f2.replace('__', ' - ') for f2 in g3_f2_names]
g3_f2_names = [f2.replace('_', ' ') for f2 in g3_f2_names]
g3_f3_names = list(g3_x3.columns)
g3_f3_names = [f3.replace('__', ' - ') for f3 in g3_f3_names]
g3_f3_names = [f3.replace('_', ' ') for f3 in g3_f3_names]
g3_f4_names = list(g3_x4.columns)
g3_f4_names = [f4.replace('__', ' - ') for f4 in g3_f4_names]
g3_f4_names = [f4.replace('_', ' ') for f4 in g3_f4_names]
#Prepare data for the outcome 1 (g3).
g3_y1 = g3_x1.pop('OUTCOME')
g3_y1_valid = g3_x1_valid.pop('OUTCOME')
#Prepare data for the outcome 2 (g3).
g3_y2 = g3_x2.pop('OUTCOME')
g3_y2_valid = g3_x2_valid.pop('OUTCOME')
#Prepare data for the outcome 3 (g3).
g3_y3 = g3_x3.pop('OUTCOME')
g3_y3_valid = g3_x3_valid.pop('OUTCOME')
#Prepare data for the outcome 4 (g3).
g3_y4 = g3_x4.pop('OUTCOME')
g3_y4_valid = g3_x4_valid.pop('OUTCOME')
#Assign hyperparameters (g3).
g3_y1_params = {'objective': 'binary', 'boosting_type': 'gbdt', 'lambda_l1': 0.0002964237261979634, 'lambda_l2': 0.0003914277818587476, 'num_leaves': 13, 'feature_fraction': 0.4598669689672836, 'bagging_fraction': 0.5293813102688141, 'bagging_freq': 3, 'min_child_samples': 93, 'metric': 'binary_logloss', 'verbosity': -1, 'random_state': 31}
g3_y2_params = {'criterion': 'entropy', 'max_depth': 53, 'n_estimators': 800, 'min_samples_leaf': 4, 'min_samples_split': 8, 'random_state': 31}
g3_y3_params = {'criterion': 'entropy', 'max_depth': 44, 'n_estimators': 1000, 'min_samples_leaf': 2, 'min_samples_split': 7, 'random_state': 31}
g3_y4_params = {'objective': 'binary', 'boosting_type': 'gbdt', 'lambda_l1': 1.9669754108753293e-06, 'lambda_l2': 0.0012741287968581684, 'num_leaves': 17, 'feature_fraction': 0.40138611969162713, 'bagging_fraction': 0.49839300388462815, 'bagging_freq': 4, 'min_child_samples': 91, 'metric': 'binary_logloss', 'verbosity': -1, 'random_state': 31}
#Training models (g3).
from lightgbm import LGBMClassifier
lgb = LGBMClassifier(**g3_y1_params)
g3_y1_model = lgb
g3_y1_model = g3_y1_model.fit(g3_x1, g3_y1)
g3_y1_explainer = shap.Explainer(g3_y1_model.predict, g3_x1)
g3_y1_calib_probs = g3_y1_model.predict_proba(g3_x1_valid)
g3_y1_calib_model = LogisticRegression()
g3_y1_calib_model = g3_y1_calib_model.fit(g3_y1_calib_probs, g3_y1_valid)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(**g3_y2_params)
g3_y2_model = rf
g3_y2_model = g3_y2_model.fit(g3_x2, g3_y2)
g3_y2_explainer = shap.Explainer(g3_y2_model.predict, g3_x2)
g3_y2_calib_probs = g3_y2_model.predict_proba(g3_x2_valid)
g3_y2_calib_model = LogisticRegression()
g3_y2_calib_model = g3_y2_calib_model.fit(g3_y2_calib_probs, g3_y2_valid)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(**g3_y3_params)
g3_y3_model = rf
g3_y3_model = g3_y3_model.fit(g3_x3, g3_y3)
g3_y3_explainer = shap.Explainer(g3_y3_model.predict, g3_x3)
g3_y3_calib_probs = g3_y3_model.predict_proba(g3_x3_valid)
g3_y3_calib_model = LogisticRegression()
g3_y3_calib_model = g3_y3_calib_model.fit(g3_y3_calib_probs, g3_y3_valid)
from lightgbm import LGBMClassifier
lgb = LGBMClassifier(**g3_y4_params)
g3_y4_model = lgb
g3_y4_model = g3_y4_model.fit(g3_x4, g3_y4)
g3_y4_explainer = shap.Explainer(g3_y4_model.predict, g3_x4)
g3_y4_calib_probs = g3_y4_model.predict_proba(g3_x4_valid)
g3_y4_calib_model = LogisticRegression()
g3_y4_calib_model = g3_y4_calib_model.fit(g3_y4_calib_probs, g3_y4_valid)
#Define output functions (g3).
g3_output_y1 = (
"""
The probability of 12-month survival:
{:.2f}%
"""
)
g3_output_y2 = (
"""
The probability of 24-month survival:
{:.2f}%
"""
)
g3_output_y3 = (
"""
The probability of 36-month survival:
{:.2f}%
"""
)
g3_output_y4 = (
"""
The probability of 60-month survival:
{:.2f}%
"""
)
#Define predict for y1 (g3).
def g3_y1_predict(*args):
g3_df1 = pd.DataFrame([args], columns=g3_x1.columns)
pos_pred = g3_y1_model.predict_proba(g3_df1)
pos_pred = g3_y1_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g3_output_y1.format(prob * 100)
return output
#Define predict for y2 (g3).
def g3_y2_predict(*args):
g3_df2 = pd.DataFrame([args], columns=g3_x2.columns)
pos_pred = g3_y2_model.predict_proba(g3_df2)
pos_pred = g3_y2_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g3_output_y2.format(prob * 100)
return output
#Define predict for y3 (g3).
def g3_y3_predict(*args):
g3_df3 = pd.DataFrame([args], columns=g3_x3.columns)
pos_pred = g3_y3_model.predict_proba(g3_df3)
pos_pred = g3_y3_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g3_output_y3.format(prob * 100)
return output
#Define predict for y4 (g3).
def g3_y4_predict(*args):
g3_df4 = pd.DataFrame([args], columns=g3_x4.columns)
pos_pred = g3_y4_model.predict_proba(g3_df4)
pos_pred = g3_y4_calib_model.predict_proba(pos_pred)
prob = pos_pred[0][1]
prob = 1-prob
output = g3_output_y4.format(prob * 100)
return output
#Define function for wrapping feature labels.
def wrap_labels(ax, width, break_long_words=False):
labels = []
for label in ax.get_yticklabels():
text = label.get_text()
labels.append(textwrap.fill(text, width=width, break_long_words=break_long_words))
ax.set_yticklabels(labels, rotation=0)
#Define interpret for y1 (g2).
def g2_y1_interpret(*args):
g2_df1 = pd.DataFrame([args], columns=g2_x1.columns)
g2_shap_values1 = g2_y1_explainer(g2_df1).values
g2_shap_values1 = np.abs(g2_shap_values1)
shap.bar_plot(g2_shap_values1[0], max_display = 10, show = False, feature_names = g2_f1_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y2 (g2).
def g2_y2_interpret(*args):
g2_df2 = pd.DataFrame([args], columns=g2_x2.columns)
g2_shap_values2 = g2_y2_explainer(g2_df2).values
g2_shap_values2 = np.abs(g2_shap_values2)
shap.bar_plot(g2_shap_values2[0], max_display = 10, show = False, feature_names = g2_f2_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y3 (g2).
def g2_y3_interpret(*args):
g2_df3 = pd.DataFrame([args], columns=g2_x3.columns)
g2_shap_values3 = g2_y3_explainer(g2_df3).values
g2_shap_values3 = np.abs(g2_shap_values3)
shap.bar_plot(g2_shap_values3[0], max_display = 10, show = False, feature_names = g2_f3_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y4 (g2).
def g2_y4_interpret(*args):
g2_df4 = pd.DataFrame([args], columns=g2_x4.columns)
g2_shap_values4 = g2_y4_explainer(g2_df4).values
g2_shap_values4 = np.abs(g2_shap_values4)
shap.bar_plot(g2_shap_values4[0], max_display = 10, show = False, feature_names = g2_f4_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y1 (g3).
def g3_y1_interpret(*args):
g3_df1 = pd.DataFrame([args], columns=g3_x1.columns)
g3_shap_values1 = g3_y1_explainer(g3_df1).values
g3_shap_values1 = np.abs(g3_shap_values1)
shap.bar_plot(g3_shap_values1[0], max_display = 10, show = False, feature_names = g3_f1_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y2 (g3).
def g3_y2_interpret(*args):
g3_df2 = pd.DataFrame([args], columns=g3_x2.columns)
g3_shap_values2 = g3_y2_explainer(g3_df2).values
g3_shap_values2 = np.abs(g3_shap_values2)
shap.bar_plot(g3_shap_values2[0], max_display = 10, show = False, feature_names = g3_f2_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y3 (g3).
def g3_y3_interpret(*args):
g3_df3 = pd.DataFrame([args], columns=g3_x3.columns)
g3_shap_values3 = g3_y3_explainer(g3_df3).values
g3_shap_values3 = np.abs(g3_shap_values3)
shap.bar_plot(g3_shap_values3[0], max_display = 10, show = False, feature_names = g3_f3_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
#Define interpret for y4 (g3).
def g3_y4_interpret(*args):
g3_df4 = pd.DataFrame([args], columns=g3_x4.columns)
g3_shap_values4 = g3_y4_explainer(g3_df4).values
g3_shap_values4 = np.abs(g3_shap_values4)
shap.bar_plot(g3_shap_values4[0], max_display = 10, show = False, feature_names = g3_f4_names)
fig = plt.gcf()
ax = plt.gca()
wrap_labels(ax, 20)
ax.figure
plt.tight_layout()
fig.set_figheight(7)
fig.set_figwidth(9)
plt.xlabel("SHAP value (impact on model output)", fontsize =12, fontweight = 'heavy', labelpad = 8)
plt.tick_params(axis="y",direction="out", labelsize = 12)
plt.tick_params(axis="x",direction="out", labelsize = 12)
return fig
with gr.Blocks(title = "NCDB-G2G3 Glioma") as demo:
gr.Markdown(
"""
NOT FOR CLINICAL USE
Grade II and III Glioma Survival Outcomes
Prediction Tool
This web application should not be used to guide any clinical decisions.
"""
)
gr.Markdown(
"""
Model Performances for Grade II Gliomas
Outcome |
Algorithm |
Sensitivity |
Specificity |
Accuracy |
AUPRC |
AUROC |
Brier Score |
12-Month Mortality |
Random Forest |
0.838 (0.822 - 0.854) |
0.814 (0.797 - 0.831) |
0.816 (0.799 - 0.833) |
0.383 (0.361 - 0.405) |
0.888 (0.856 - 0.912) |
0.054 (0.044 - 0.064) |
24-Month Mortality |
LightGBM |
0.712 (0.692 - 0.732) |
0.839 (0.822 - 0.856) |
0.823 (0.806 - 0.840) |
0.523 (0.500 - 0.546) |
0.859 (0.804 - 0.867) |
0.054 (0.044 - 0.064) |
36-Month Mortality |
LightGBM |
0.653 (0.631 - 0.675) |
0.836 (0.819 - 0.853) |
0.803 (0.785 - 0.821) |
0.564 (0.541 - 0.587) |
0.813 (0.777 - 0.835) |
0.111 (0.096 - 0.126) |
60-Month Mortality |
Random Forest |
0.838 (0.822 - 0.854) |
0.814 (0.797 - 0.831) |
0.816 (0.799 - 0.833) |
0.383 (0.361 - 0.405) |
0.888 (0.856 - 0.912) |
0.054 (0.044 - 0.064) |
"""
)
gr.Markdown(
"""
"""
)
gr.Markdown(
"""
Model Performances for Grade III Gliomas
Outcome |
Algorithm |
Sensitivity |
Specificity |
Accuracy |
AUPRC |
AUROC |
Brier Score |
12-Month Mortality |
LightGBM |
0.768 (0.750 - 0.786) |
0.811 (0.795 - 0.827) |
0.800 (0.783 - 0.817) |
0.725 (0.706 - 0.744) |
0.876 (0.857 - 0.889) |
0.119 (0.106 - 0.132) |
24-Month Mortality |
Random Forest |
0.722 (0.703 - 0.741) |
0.810 (0.794 - 0.826) |
0.796 (0.779 - 0.813) |
0.775 (0.758 - 0.792) |
0.855 (0.839 - 0.870) |
0.153 (0.138 - 0.168) |
36-Month Mortality |
Random Forest |
0.763 (0.745 - 0.781) |
0.827 (0.811 - 0.843) |
0.874 (0.860 - 0.888) |
0.794 (0.777 - 0.811) |
0.878 (0.857 - 0.885) |
0.146 (0.131 - 0.161) |
60-Month Mortality |
LightGBM |
0.816 (0.798 - 0.834) |
0.748 (0.728 - 0.768) |
0.930 (0.918 - 0.942) |
0.795 (0.776 - 0.814) |
0.860 (0.834 - 0.870) |
0.142 (0.126 - 0.158) |
"""
)
gr.Markdown(
"""
"""
)
with gr.Tab('Grade II'):
with gr.Row():
with gr.Column():
Age = gr.Slider(label="Age", minimum = 18, maximum = 99, step = 1, value = 55)
Sex = gr.Dropdown(label = "Sex", choices = ['Male', 'Female'], type = 'index', value = 'Male')
Race = gr.Dropdown(label = "Race", choices = ['White', 'Black', 'Asian Indian or Pakistani', 'Chinese', 'Filipino', 'American Indian, Aleutian, or Eskimo', 'Vietnamese', 'Korean', 'Other or Unknown'], type = 'index', value = 'White')
Hispanic_Ethnicity = gr.Dropdown(label = "Hispanic Ethnicity", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
Primary_Payor = gr.Dropdown(label = "Primary Payor", choices = ['Private insurance', 'Medicare', 'Medicaid', 'Other government', 'Not insured', 'Unknown'], type = 'index', value = 'Private insurance')
Facility_Type = gr.Dropdown(label = "Facility Type", choices = ['Academic/Research Program', 'Comprehensive Community Cancer Program', 'Integrated Network Cancer Program', 'Community Cancer Program', 'Other or Unknown'], type = 'index', value = 'Academic/Research Program')
Facility_Location = gr.Dropdown(label = "Facility Location", choices = ['South Atlantic', 'East North Central', 'Middle Atlantic', 'East North Central', 'Middle Atlantic', 'Pacific', 'West South Central', 'West North Central', 'East South Central', 'New England', 'Mountain', 'Unknown or Other'], type = 'index', value = 'South Atlantic')
CharlsonDeyo_Score = gr.Dropdown(label = "Charlson-Deyo Score", choices = ['0', '1', '2', 'Greater than 3'], type = 'index', value = '0')
Karnofsky_Performance_Scale = gr.Dropdown(label = "Karnofsky Performance Scale", choices = ['KPS 0-20', 'KPS 21-40', 'KPS 41-60', 'KPS 61-80', 'KPS 81-100', 'Unknown'], type = 'index', value = 'KPS 81-100')
Laterality = gr.Dropdown(label = "Laterality", choices = ['Right', 'Left', 'Bilateral', 'Midline', 'Unknown'], type = 'index', value = 'Right')
Tumor_Localization = gr.Dropdown(label = "Tumor Localization", choices = ['Frontal lobe', 'Temporal lobe', 'Parietal lobe', 'Occipital lobe', 'Overlapping', 'Intraventricular', 'Cerebellum', 'Brain stem', 'Unknown'], type = 'index', value = 'Frontal lobe')
Focality = gr.Dropdown(label = "Focality", choices = ['Unifocal', 'Multifocal', 'Unknown'], type = 'index', value = 'Unifocal')
Diagnostic_Biopsy = gr.Dropdown(label = "Diagnostic Biopsy", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
Tumor_Size = gr.Dropdown(label = "Tumor Size", choices = ['< 2 cm', '2 - 3.9 cm', '4 - 5.9 cm', '6 - 7.9 cm', '8 - 9.9 cm', '10 - 11.9 cm', '12 - 13.9 cm', '14 - 15.9 cm', '16 - 17.9 cm', '18 - 19.9 cm', '> 20 cm', 'Unknown'], type = 'index', value = '< 2 cm')
Histology = gr.Dropdown(label = "Histology", choices = ['Pilomyxoid astrocytoma', 'Diffuse astrocytoma', 'Pleomorphic astrocytoma', 'Oligodendroglioma', 'Oligoastrocytoma'], type = 'index', value = 'Diffuse astrocytoma')
CoDeletion_1p19q = gr.Dropdown(label = "1p19q Co-Deletion", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
MGMT_Methylation = gr.Dropdown(label = "MGMT Methylation", choices = ['Unmethylated', 'Methylated', 'Unknown'], type = 'index', value = 'Unmethylated')
Ki67_Labeling_Index = gr.Dropdown(label = 'Ki-67 Labeling Index', choices = ['0-20%', '21-40%', '41-60%', '61-80%', '81-100%', 'Normal (no percentage available)', 'Slightly elevated (no percentage available)', 'Elevated (no percentage available)', 'Unknown'], type = 'index', value = '0-20%')
Extent_of_Resection = gr.Dropdown(label = "Extent of Resection", choices = ['No resective surgery was performed', 'Gross total resection', 'Subtotal resection', 'Unknown'], type = 'index', value = 'Gross total resection')
Radiation_Treatment = gr.Dropdown(label = "Radiation Treatment", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'Yes')
Chemotherapy = gr.Dropdown(label = "Chemotherapy", choices = ['No', 'Yes (single-agent chemotherapy)', 'Yes (multi-agent chemotherapy)', 'Yes (details unknown)', 'Unknown'], type = 'index', value = 'No')
Immunotherapy = gr.Dropdown(label = "Immunotherapy", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
with gr.Column():
with gr.Box():
gr.Markdown(
"""
12-Month Survival
This model uses the Random Forest algorithm.
"""
)
with gr.Row():
y1_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label1 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y1_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot1 = gr.Plot()
gr.Markdown(
"""
"""
)
with gr.Box():
gr.Markdown(
"""
24-Month Survival
This model uses the LightGBM algorithm.
"""
)
with gr.Row():
y2_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label2 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y2_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot2 = gr.Plot()
gr.Markdown(
"""
"""
)
with gr.Box():
gr.Markdown(
"""
36-Month Survival
This model uses the LightGBM algorithm.
"""
)
with gr.Row():
y3_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label3 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y3_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot3 = gr.Plot()
gr.Markdown(
"""
"""
)
with gr.Box():
gr.Markdown(
"""
60-Month Survival
This model uses the Random Forest algorithm.
"""
)
with gr.Row():
y4_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label4 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y4_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot4 = gr.Plot()
gr.Markdown(
"""
"""
)
y1_predict_btn.click(
g2_y1_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label1]
)
y2_predict_btn.click(
g2_y2_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label2]
)
y3_predict_btn.click(
g2_y3_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label3]
)
y4_predict_btn.click(
g2_y4_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label4]
)
y1_interpret_btn.click(
g2_y1_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot1],
)
y2_interpret_btn.click(
g2_y2_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot2],
)
y3_interpret_btn.click(
g2_y3_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot3],
)
y4_interpret_btn.click(
g2_y4_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot4],
)
with gr.Tab('Grade III'):
with gr.Row():
with gr.Column():
Age = gr.Slider(label="Age", minimum = 18, maximum = 99, step = 1, value = 55)
Sex = gr.Dropdown(label = "Sex", choices = ['Male', 'Female'], type = 'index', value = 'Male')
Race = gr.Dropdown(label = "Ethnicity", choices = ['White', 'Black', 'Asian Indian or Pakistani', 'Chinese', 'Filipino', 'American Indian, Aleutian, or Eskimo', 'Vietnamese', 'Korean', 'Other or Unknown'], type = 'index', value = 'White')
Hispanic_Ethnicity = gr.Dropdown(label = "Spanish/Hispanic Origin", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
Primary_Payor = gr.Dropdown(label = "Primary Payor", choices = ['Private insurance', 'Medicare', 'Medicaid', 'Other government', 'Not insured', 'Unknown'], type = 'index', value = 'Private insurance')
Facility_Type = gr.Dropdown(label = "Facility Type", choices = ['Academic/Research Program', 'Comprehensive Community Cancer Program', 'Integrated Network Cancer Program', 'Community Cancer Program', 'Other or Unknown'], type = 'index', value = 'Academic/Research Program')
Facility_Location = gr.Dropdown(label = "Facility Location", choices = ['South Atlantic', 'East North Central', 'Middle Atlantic', 'East North Central', 'Middle Atlantic', 'Pacific', 'West South Central', 'West North Central', 'East South Central', 'New England', 'Mountain', 'Unknown or Other'], type = 'index', value = 'South Atlantic')
CharlsonDeyo_Score = gr.Dropdown(label = "Charlson-Deyo Score", choices = ['0', '1', '2', 'Greater than 3'], type = 'index', value = '0')
Karnofsky_Performance_Scale = gr.Dropdown(label = "Karnofsky Performance Scale", choices = ['KPS 0-20', 'KPS 21-40', 'KPS 41-60', 'KPS 61-80', 'KPS 81-100', 'Unknown'], type = 'index', value = 'KPS 81-100')
Laterality = gr.Dropdown(label = "Laterality", choices = ['Right', 'Left', 'Bilateral', 'Midline', 'Unknown'], type = 'index', value = 'Right')
Tumor_Localization = gr.Dropdown(label = "Tumor Localization", choices = ['Frontal lobe', 'Temporal lobe', 'Parietal lobe', 'Occipital lobe', 'Overlapping', 'Intraventricular', 'Cerebellum', 'Brain stem', 'Unknown'], type = 'index', value = 'Frontal lobe')
Focality = gr.Dropdown(label = "Focality", choices = ['Unifocal', 'Multifocal', 'Unknown'], type = 'index', value = 'Unifocal')
Diagnostic_Biopsy = gr.Dropdown(label = "Diagnostic Biopsy", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
Tumor_Size = gr.Dropdown(label = "Tumor Size", choices = ['< 2 cm', '2 - 3.9 cm', '4 - 5.9 cm', '6 - 7.9 cm', '8 - 9.9 cm', '10 - 11.9 cm', '12 - 13.9 cm', '14 - 15.9 cm', '16 - 17.9 cm', '18 - 19.9 cm', '> 20 cm', 'Unknown'], type = 'index', value = '< 2 cm')
Histology = gr.Dropdown(label = "Histology", choices = ['Anaplastic astrocytoma', 'Anaplastic oligodendroglioma', 'Anaplastic oligoastrocytoma'], type = 'index', value = 'Anaplastic astrocytoma')
CoDeletion_1p19q = gr.Dropdown(label = "1p19q Co-Deletion", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
MGMT_Methylation = gr.Dropdown(label = "MGMT Methylation", choices = ['Unmethylated', 'Methylated', 'Unknown'], type = 'index', value = 'Unmethylated')
Ki67_Labeling_Index = gr.Dropdown(label = 'Ki-67 Labeling Index', choices = ['0-20%', '21-40%', '41-60%', '61-80%', '81-100%', 'Normal (no percentage available)', 'Slightly elevated (no percentage available)', 'Elevated (no percentage available)', 'Unknown'], type = 'index', value = '0-20%')
Extent_of_Resection = gr.Dropdown(label = "Extent of Resection", choices = ['No resective surgery was performed', 'Gross total resection', 'Subtotal resection', 'Unknown'], type = 'index', value = 'Gross total resection')
Radiation_Treatment = gr.Dropdown(label = "Radiation Treatment", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'Yes')
Chemotherapy = gr.Dropdown(label = "Chemotherapy", choices = ['No', 'Yes (single-agent chemotherapy)', 'Yes (multi-agent chemotherapy)', 'Yes (details unknown)', 'Unknown'], type = 'index', value = 'No')
Immunotherapy = gr.Dropdown(label = "Immunotherapy", choices = ['No', 'Yes', 'Unknown'], type = 'index', value = 'No')
with gr.Column():
with gr.Box():
gr.Markdown(
"""
12-Month Survival
This model uses the LightGBM algorithm.
"""
)
with gr.Row():
y1_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label1 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y1_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot1 = gr.Plot()
gr.Markdown(
"""
"""
)
with gr.Box():
gr.Markdown(
"""
24-Month Survival
This model uses the Random Forest algorithm.
"""
)
with gr.Row():
y2_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label2 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y2_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot2 = gr.Plot()
gr.Markdown(
"""
"""
)
with gr.Box():
gr.Markdown(
"""
36-Month Survival
This model uses the Random Forest algorithm.
"""
)
with gr.Row():
y3_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label3 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y3_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot3 = gr.Plot()
gr.Markdown(
"""
"""
)
with gr.Box():
gr.Markdown(
"""
60-Month Survival
This model uses the LightGBM algorithm.
"""
)
with gr.Row():
y4_predict_btn = gr.Button(value="Predict")
gr.Markdown(
"""
"""
)
label4 = gr.Markdown()
gr.Markdown(
"""
"""
)
with gr.Row():
y4_interpret_btn = gr.Button(value="Explain")
gr.Markdown(
"""
"""
)
plot4 = gr.Plot()
gr.Markdown(
"""
"""
)
y1_predict_btn.click(
g3_y1_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label1]
)
y2_predict_btn.click(
g3_y2_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label2]
)
y3_predict_btn.click(
g3_y3_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label3]
)
y4_predict_btn.click(
g3_y4_predict,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [label4]
)
y1_interpret_btn.click(
g3_y1_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot1],
)
y2_interpret_btn.click(
g3_y2_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot2],
)
y3_interpret_btn.click(
g3_y3_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot3],
)
y4_interpret_btn.click(
g3_y4_interpret,
inputs = [Facility_Type,Facility_Location,Age,Sex,Race,Hispanic_Ethnicity,Primary_Payor,CharlsonDeyo_Score,Histology,Tumor_Localization,Laterality,Diagnostic_Biopsy,Ki67_Labeling_Index,Karnofsky_Performance_Scale,MGMT_Methylation,Focality,Tumor_Size,Chemotherapy,Immunotherapy,CoDeletion_1p19q,Extent_of_Resection,Radiation_Treatment],
outputs = [plot4],
)
gr.Markdown(
"""
Disclaimer
The data utilized for this tool is sourced from the Commission on Cancer (CoC) of the American College of Surgeons and the American Cancer Society. These institutions, however, have not verified the information and are not responsible for the statistical validity of the data analysis or the conclusions drawn by the authors. This predictive tool, available on this webpage, is designed to provide general health information only and is not a substitute for professional medical advice, diagnosis, or treatment. It is strongly recommended that users consult with their own healthcare provider for any health-related concerns or issues. The authors make no warranties or representations, express or implied, regarding the accuracy, timeliness, relevance, or utility of the information contained in this tool. The health information in the prediction tool is subject to change and can be affected by various confounders, therefore it may be outdated, incomplete, or incorrect. No doctor-patient relationship is created by using this prediction tool and the authors have not validated its content. The authors do not record any specific user information or initiate contact with users. Before making any healthcare decisions or taking or refraining from any action based on the information in this prediction tool, it is advisable to seek professional advice from a healthcare provider. By using the prediction tool, users acknowledge and agree that neither the authors nor any other party will be liable for any decisions made, actions taken or not taken as a result of the information provided herein.
By using this tool, you accept all of the above terms.
"""
)
demo.launch()