import streamlit as st
import pandas as pd
from huggingface_hub import HfApi
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
from itertools import combinations
import re
from functools import cache
from io import StringIO
from yall import create_yall
import plotly.graph_objs as go

def calculate_pages(df, items_per_page):
    return -(-len(df) // items_per_page)  # Equivalent to math.ceil(len(df) / items_per_page)

    

# Function to get model info from Hugging Face API using caching
@cache
def cached_model_info(api, model):
    try:
        return api.model_info(repo_id=str(model))
    except (RepositoryNotFoundError, RevisionNotFoundError):
        return None

# Function to get model info from DataFrame and update it with likes and tags
@st.cache
def get_model_info(df):
    api = HfApi()

    for index, row in df.iterrows():
        model_info = cached_model_info(api, row['Model'].strip())
        if model_info:
            df.loc[index, 'Likes'] = model_info.likes
            df.loc[index, 'Tags'] = ', '.join(model_info.tags)
        else:
            df.loc[index, 'Likes'] = -1
            df.loc[index, 'Tags'] = ''
    return df

# Function to convert markdown table to DataFrame and extract Hugging Face URLs
def convert_markdown_table_to_dataframe(md_content):
    """
    Converts markdown table to Pandas DataFrame, handling special characters and links,
    extracts Hugging Face URLs, and adds them to a new column.
    """
    # Remove leading and trailing | characters
    cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)

    # Create DataFrame from cleaned content
    df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')

    # Remove the first row after the header
    df = df.drop(0, axis=0)

    # Strip whitespace from column names
    df.columns = df.columns.str.strip()

    # Extract Hugging Face URLs and add them to a new column
    model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)'
    df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None)

    # Clean Model column to have only the model link text
    df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x))

    return df

@st.cache_data
def get_model_info(df):
    api = HfApi()

    # Initialize new columns for likes and tags
    df['Likes'] = None
    df['Tags'] = None

    # Iterate through DataFrame rows
    for index, row in df.iterrows():
        model = row['Model'].strip()
        try:
            model_info = api.model_info(repo_id=str(model))
            df.loc[index, 'Likes'] = model_info.likes
            df.loc[index, 'Tags'] = ', '.join(model_info.tags)

        except (RepositoryNotFoundError, RevisionNotFoundError):
            df.loc[index, 'Likes'] = -1
            df.loc[index, 'Tags'] = ''

    return df

#def calculate_highest_combined_score(data, column):
#    score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
#    # Ensure the column exists and has numeric data
#    if column not in data.columns or not pd.api.types.is_numeric_dtype(data[column]):
#        return column, {}
#    scores = data[column].dropna().tolist()
#    models = data['Model'].tolist()
#    top_combinations = {r: [] for r in range(2, 5)}
#    for r in range(2, 5):
#        for combination in combinations(zip(scores, models), r):
#            combined_score = sum(score for score, _ in combination)
#            top_combinations[r].append((combined_score, tuple(model for _, model in combination)))
#        top_combinations[r].sort(key=lambda x: x[0], reverse=True)
#        top_combinations[r] = top_combinations[r][:5]
#    return column, top_combinations

## Modified function to display the results of the highest combined scores using st.dataframe
#def display_highest_combined_scores(data):
#    score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
#    with st.spinner('Calculating highest combined scores...'):
#        results = [calculate_highest_combined_score(data, col) for col in score_columns]
#        for column, top_combinations in results:
#            st.subheader(f"Top Combinations for {column}")
#            for r, combinations in top_combinations.items():
#                # Prepare data for DataFrame
#                rows = [{'Score': score, 'Models': ', '.join(combination)} for score, combination in combinations]
#                df = pd.DataFrame(rows)
#                
#                # Display using st.dataframe
#                st.markdown(f"**Number of Models: {r}**")
#                st.dataframe(df, height=150)  # Adjust height as necessary

                    


# Function to create bar chart for a given category
def create_bar_chart(df, category):
    """Create and display a bar chart for a given category."""
    st.write(f"### {category} Scores")

    # Sort the DataFrame based on the category score
    sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)

    # Create the bar chart with a color gradient (using 'Viridis' color scale as an example)
    fig = go.Figure(go.Bar(
        x=sorted_df[category],
        y=sorted_df['Model'],
        orientation='h',
        marker=dict(color=sorted_df[category], colorscale='Spectral')  # You can change 'Viridis' to another color scale
    ))

    # Update layout for better readability
    fig.update_layout(
        margin=dict(l=20, r=20, t=20, b=20)
    )

    # Adjust the height of the chart based on the number of rows in the DataFrame
    st.plotly_chart(fig, use_container_width=True, height=len(df) * 35)

# Main function to run the Streamlit app
def main():
    # Set page configuration and title
    st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")

    st.title("🏆 YALL - Yet Another LLM Leaderboard")
    st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")

    # Create tabs for leaderboard and about section
    content = create_yall()
    tab1, tab2 = st.tabs(["🏆 Leaderboard", "📝 About"])

    # Leaderboard tab
    with tab1:
        if content:
            try:
                score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']

                # Display dataframe
                full_df = convert_markdown_table_to_dataframe(content)

                for col in score_columns:
                    # Corrected use of pd.to_numeric
                    full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce')

                full_df = get_model_info(full_df)
                full_df['Tags'] = full_df['Tags'].fillna('')
                df = pd.DataFrame(columns=full_df.columns)

                # Toggles for filtering by tags
                show_phi = st.checkbox("Phi (2.8B)", value=True)
                show_mistral = st.checkbox("Mistral (7B)", value=True)
                show_other = st.checkbox("Other", value=True)

                # Create a DataFrame based on selected filters
                dfs_to_concat = []

                if show_phi:
                    dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
                if show_mistral:
                    dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')])
                if show_other:
                    other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
                    dfs_to_concat.append(other_df)

                # Concatenate the DataFrames
                if dfs_to_concat:
                    df = pd.concat(dfs_to_concat, ignore_index=True)

                # Add a search bar
                search_query = st.text_input("Search models", "")

                # Filter the DataFrame based on the search query
                if search_query:
                    df = df[df['Model'].str.contains(search_query, case=False)]

                # Add a selectbox for page selection
                items_per_page = 30
                pages = calculate_pages(df, items_per_page)
                page = st.selectbox("Page", list(range(1, pages + 1)))

                # Sort the DataFrame by 'Average' column in descending order
                df = df.sort_values(by='Average', ascending=False)

                # Slice the DataFrame based on the selected page
                start = (page - 1) * items_per_page
                end = start + items_per_page
                df = df[start:end]
                
                # Display the filtered DataFrame or the entire leaderboard
                st.dataframe(
                    df[['Model'] + score_columns + ['Likes', 'URL']],
                    use_container_width=True,
                    column_config={
                        "Likes": st.column_config.NumberColumn(
                            "Likes",
                            help="Number of likes on Hugging Face",
                            format="%d ❤️",
                        ),
                        "URL": st.column_config.LinkColumn("URL"),
                    },
                    hide_index=True,
                    height=len(df) * 37,
                )
                selected_models = st.multiselect('Select models to compare', df['Model'].unique())
                comparison_df = df[df['Model'].isin(selected_models)]
                st.dataframe(comparison_df)
                # Add a button to export data to CSV
                if st.button("Export to CSV"):
                    # Export the DataFrame to CSV
                    csv_data = full_df.to_csv(index=False)

                    # Create a link to download the CSV file
                    st.download_button(
                        label="Download CSV",
                        data=csv_data,
                        file_name="leaderboard.csv",
                        key="download-csv",
                        help="Click to download the CSV file",
                    )

                # Full-width plot for the first category
                create_bar_chart(df, score_columns[0])

                # Next two plots in two columns
                col1, col2 = st.columns(2)
                with col1:
                    create_bar_chart(df, score_columns[1])
                with col2:
                    create_bar_chart(df, score_columns[2])

                # Last two plots in two columns
                col3, col4 = st.columns(2)
                with col3:
                    create_bar_chart(df, score_columns[3])
                with col4:
                    create_bar_chart(df, score_columns[4])

#                display_highest_combined_scores(full_df)  # Call to display the calculated scores
            except Exception as e:
                st.error("An error occurred while processing the markdown table.")
                st.error(str(e))
        else:
            st.error("Failed to download the content from the URL provided.")
     # About tab
    with tab2:
        st.markdown('''
            ### Nous benchmark suite
            Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
            * [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
            * **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
            * [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
            * [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
            ### Reproducibility
            You can easily reproduce these results using 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
            ### Clone this space
            You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
            * Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
            * Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
            A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations.
        ''')
        



# Run the main function if this script is run directly
if __name__ == "__main__":
    main()