Added files
Browse files- .gitignore +1 -0
- FIFA_Processed_Data.csv +0 -0
- FIFA_Standardized_Data.csv +0 -0
- FIFA_Standardized_Data.joblib +3 -0
- app.ipynb +0 -0
- app.py +88 -0
- requirements.txt +7 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.venv/
|
FIFA_Processed_Data.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
FIFA_Standardized_Data.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
FIFA_Standardized_Data.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e1e5a81d1da3e7e335ef9238fb3e7258e29f1523d1c9b017abd681e690be5aa6
|
| 3 |
+
size 2332218
|
app.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
app.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import joblib
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import os
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import seaborn as sns
|
| 7 |
+
from sklearn.decomposition import PCA
|
| 8 |
+
from datasets import load_dataset
|
| 9 |
+
|
| 10 |
+
def load_clustered_data():
|
| 11 |
+
df = joblib.load("FIFA_Standardized_Data.joblib")
|
| 12 |
+
|
| 13 |
+
# Ensure required clustering columns exist
|
| 14 |
+
required_columns = ["DBSCAN_Cluster", "PCA1", "PCA2", "TSNE1", "TSNE2"]
|
| 15 |
+
missing_columns = [col for col in required_columns if col not in df.columns]
|
| 16 |
+
|
| 17 |
+
if missing_columns:
|
| 18 |
+
st.error(f"โ ๏ธ Missing columns in dataset: {', '.join(missing_columns)}. Please re-run clustering and save the dataset.")
|
| 19 |
+
return None
|
| 20 |
+
|
| 21 |
+
return df
|
| 22 |
+
|
| 23 |
+
def load_fifa_dataset():
|
| 24 |
+
dataset = load_dataset("Ci-Dave/FIFA2019")
|
| 25 |
+
df = pd.DataFrame(dataset["train"])
|
| 26 |
+
df.rename(columns={"ShortPassing": "Passing", "StandingTackle": "Defending", "Strength": "Physical"}, inplace=True)
|
| 27 |
+
return df
|
| 28 |
+
|
| 29 |
+
def home_page():
|
| 30 |
+
st.title("โฝ FIFA 2019 Clustering Analysis")
|
| 31 |
+
st.write("""
|
| 32 |
+
This Streamlit app demonstrates unsupervised learning using **clustering techniques** on the FIFA 2019 dataset.
|
| 33 |
+
|
| 34 |
+
**Key Features:**
|
| 35 |
+
- Displays the dataset
|
| 36 |
+
- Allows user interaction for visualizing clusters
|
| 37 |
+
- Uses models like **DBSCAN, PCA, and t-SNE**
|
| 38 |
+
""")
|
| 39 |
+
|
| 40 |
+
def dataset_page():
|
| 41 |
+
st.title("๐ FIFA 2019 Dataset")
|
| 42 |
+
df = load_fifa_dataset()
|
| 43 |
+
st.dataframe(df)
|
| 44 |
+
|
| 45 |
+
def visualization_page():
|
| 46 |
+
st.title("๐ Clustering Visualization")
|
| 47 |
+
df = load_clustered_data()
|
| 48 |
+
|
| 49 |
+
if df is None:
|
| 50 |
+
return # Stop execution if dataset is missing required columns
|
| 51 |
+
|
| 52 |
+
clustering_algorithms = ["DBSCAN", "PCA", "t-SNE"]
|
| 53 |
+
selected_algo = st.selectbox("Choose a Clustering Algorithm:", clustering_algorithms)
|
| 54 |
+
|
| 55 |
+
if selected_algo == "DBSCAN":
|
| 56 |
+
st.subheader("DBSCAN Clustering")
|
| 57 |
+
plt.figure(figsize=(8,5))
|
| 58 |
+
sns.scatterplot(x=df["PCA1"], y=df["PCA2"], hue=df["DBSCAN_Cluster"], palette="coolwarm")
|
| 59 |
+
st.pyplot(plt)
|
| 60 |
+
|
| 61 |
+
elif selected_algo == "PCA":
|
| 62 |
+
st.subheader("PCA Visualization")
|
| 63 |
+
pca = PCA(n_components=2)
|
| 64 |
+
pca_result = pca.fit_transform(df.iloc[:, :-1])
|
| 65 |
+
plt.scatter(pca_result[:, 0], pca_result[:, 1], c=df["DBSCAN_Cluster"], cmap="plasma")
|
| 66 |
+
plt.xlabel("PCA Component 1")
|
| 67 |
+
plt.ylabel("PCA Component 2")
|
| 68 |
+
st.pyplot(plt)
|
| 69 |
+
|
| 70 |
+
elif selected_algo == "t-SNE":
|
| 71 |
+
st.subheader("t-SNE Visualization")
|
| 72 |
+
plt.figure(figsize=(8,5))
|
| 73 |
+
sns.scatterplot(x=df["TSNE1"], y=df["TSNE2"], hue=df["DBSCAN_Cluster"], palette="coolwarm")
|
| 74 |
+
st.pyplot(plt)
|
| 75 |
+
|
| 76 |
+
def main():
|
| 77 |
+
st.sidebar.title("Navigation")
|
| 78 |
+
pages = {
|
| 79 |
+
"๐ Home": home_page,
|
| 80 |
+
"๐ Dataset": dataset_page,
|
| 81 |
+
"๐ Visualizations": visualization_page,
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
choice = st.sidebar.radio("Go to", list(pages.keys()))
|
| 85 |
+
pages[choice]()
|
| 86 |
+
|
| 87 |
+
if __name__ == "__main__":
|
| 88 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pandas
|
| 2 |
+
streamlit
|
| 3 |
+
numpy
|
| 4 |
+
matplotlib
|
| 5 |
+
seaborn
|
| 6 |
+
scikit-learn
|
| 7 |
+
joblib
|