import numpy as np import pandas as pd data= pd.read_csv("heart.csv") data.info() from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler # Define features and target X = data.drop('target', axis=1) y = data['target'] # Split the data into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Standardize the features scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) from keras.models import Sequential from keras.layers import Dense # Define the FNN model model = Sequential() model.add(Dense(32, activation='relu', input_shape=(X_train.shape[1],))) model.add(Dense(16, activation='relu')) model.add(Dense(1, activation='sigmoid')) # Compile the model model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) # Train the model model.fit(X_train, y_train, epochs=50, batch_size=10, validation_split=0.2) from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_curve, auc, roc_auc_score log_reg = LogisticRegression() log_reg.fit(X_train, y_train) y_pred = log_reg.predict(X_test) from sklearn.tree import DecisionTreeClassifier # Decision Tree dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_pred = dt.predict(X_test) from sklearn.ensemble import RandomForestClassifier # Random Forest rf = RandomForestClassifier() rf.fit(X_train, y_train) y_pred = rf.predict(X_test) from sklearn.svm import SVC # Support Vector Machine svm = SVC(probability=True) svm.fit(X_train, y_train) y_pred = svm.predict(X_test) from xgboost import XGBClassifier import xgboost as xgb # XGBoost xgboost = xgb.XGBClassifier(use_label_encoder=False, eval_metric='logloss') xgboost.fit(X_train, y_train) y_pred = xgboost.predict(X_test) import gradio as gr def predict(input_age, input_sex, input_cp, input_trestbps, input_chol, input_fbs, input_restecg, input_thalach, input_exang, input_oldpeak, input_slope, input_ca, input_thal): # Input validation (add checks for other inputs as needed) if not (0 <= input_age <= 120): return {"error": "Age must be between 0 and 120."} # Prepare input data input_data = [[input_age, input_sex, input_cp, input_trestbps, input_chol, input_fbs, input_restecg, input_thalach, input_exang, input_oldpeak, input_slope, input_ca, input_thal]] input_scaled = scaler.transform(input_data) # Predictions try: predictions = { 'Logistic Regression': log_reg.predict(input_scaled)[0], 'Random Forest': rf.predict(input_scaled)[0], 'Decision Tree': dt.predict(input_scaled)[0], 'SVM': svm.predict(input_scaled)[0], 'XGBoost': xgboost.predict(input_scaled)[0], 'FNN': (model.predict(input_scaled) > 0.5).astype("int32")[0][0] } for key in predictions: predictions[key] = 'Heart Disease' if predictions[key] == 1 else 'No Heart Disease' return predictions except Exception as e: return {"error": str(e)} # Create Gradio interface input_components = [ gr.Number(label='Age'), gr.Number(label='Sex'), gr.Number(label='Chest Pain Type (cp)'), gr.Number(label='Resting Blood Pressure (trestbps)'), gr.Number(label='Cholesterol Level (chol)'), gr.Number(label='Fasting Blood Sugar (fbs)'), gr.Number(label='Resting Electrocardiographic Results (restecg)'), gr.Number(label='Maximum Heart Rate Achieved (thalach)'), gr.Number(label='Exercise Induced Angina (exang)'), gr.Number(label='Old Peak'), gr.Number(label='Slope of the Peak Exercise ST Segment (slope)'), gr.Number(label='Number of Major Vessels Colored by Fluoroscopy (ca)'), gr.Number(label='Thalassemia (thal)'), ] # Launch the Gradio interface gr.Interface(fn=predict, inputs=input_components, outputs='json', title='Heart Disease Prediction', description='Enter the features to predict the presence of heart disease.').launch()