import gradio as gr
from huggingface_hub import HfApi, hf_hub_download, Repository
from huggingface_hub.repocard import metadata_load

from PIL import Image, ImageDraw, ImageFont

from datetime import date
import time  

import os
import pandas as pd

from utils import *

api = HfApi()

DATASET_REPO_URL = "https://huggingface.co/datasets/huggingface-projects/Deep-RL-Course-Certification"
CERTIFIED_USERS_FILENAME = "certified_users.csv"
CERTIFIED_USERS_DIR = "certified_users"

HF_TOKEN = os.environ.get("HF_TOKEN")

repo = Repository(
    local_dir=CERTIFIED_USERS_DIR, clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
)

def get_user_models(hf_username, env_tag, lib_tag):
    """
    List the Reinforcement Learning models
    from user given environment and lib
    :param hf_username: User HF username
    :param env_tag: Environment tag
    :param lib_tag: Library tag
    """
    api = HfApi()
    models = api.list_models(author=hf_username, filter=["reinforcement-learning", env_tag, lib_tag])

    user_model_ids = [x.modelId for x in models]
    return user_model_ids


def get_user_sf_models(hf_username, env_tag, lib_tag):
    models_sf = []
    models = api.list_models(author=hf_username, filter=["reinforcement-learning", lib_tag])

    user_model_ids = [x.modelId for x in models]

    for model in user_model_ids:
        meta = get_metadata(model)
        if meta is None:
            continue
        result = meta["model-index"][0]["results"][0]["dataset"]["name"]
        if result == env_tag:
            models_sf.append(model)
            
    return models_sf


def get_metadata(model_id):
  """
  Get model metadata (contains evaluation data)
  :param model_id
  """
  try:
    readme_path = hf_hub_download(model_id, filename="README.md")
    return metadata_load(readme_path)
  except requests.exceptions.HTTPError:
    # 404 README.md not found
    return None


def parse_metrics_accuracy(meta):
  """
  Get model results and parse it
  :param meta: model metadata
  """
  if "model-index" not in meta:
    return None
  result = meta["model-index"][0]["results"]
  metrics = result[0]["metrics"]
  accuracy = metrics[0]["value"]
  
  return accuracy


def parse_rewards(accuracy):
  """
  Parse mean_reward and std_reward
  :param accuracy: model results
  """
  default_std = -1000
  default_reward= -1000
  if accuracy !=  None:
      accuracy = str(accuracy)
      parsed =  accuracy.split(' +/- ')
      if len(parsed)>1:
          mean_reward = float(parsed[0])
          std_reward =  float(parsed[1])
      elif len(parsed)==1: #only mean reward   
          mean_reward = float(parsed[0])
          std_reward =  float(0)
      else: 
          mean_reward = float(default_std)
          std_reward = float(default_reward)
  else:
      mean_reward = float(default_std)
      std_reward = float(default_reward)
  
  return mean_reward, std_reward

def calculate_best_result(user_model_ids):
  """
  Calculate the best results of a unit
  best_result = mean_reward - std_reward
  :param user_model_ids: RL models of a user
  """
  best_result = -1000
  best_model_id = ""
  for model in user_model_ids:
    meta = get_metadata(model)
    if meta is None:
      continue
    accuracy = parse_metrics_accuracy(meta)
    mean_reward, std_reward = parse_rewards(accuracy)
    result = mean_reward - std_reward
    if result > best_result:
      best_result = result
      best_model_id = model
      
  return best_result, best_model_id

def check_if_passed(model):
  """
  Check if result >= baseline
  to know if you pass
  :param model: user model
  """
  if model["best_result"] >= model["min_result"]:
    model["passed_"] = True


def certification(hf_username, first_name, last_name):
  results_certification = [
      {
          "unit": "Unit 1",
          "env": "LunarLander-v2",
          "library": "stable-baselines3",
          "min_result": 200,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
      },
  {
          "unit": "Unit 2",
          "env": "Taxi-v3",
          "library": "q-learning",
          "min_result": 4,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
  },
  {
          "unit": "Unit 3",
          "env": "SpaceInvadersNoFrameskip-v4",
          "library": "stable-baselines3",
          "min_result": 200,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
  },
  {
          "unit": "Unit 4",
          "env": "CartPole-v1",
          "library": "reinforce",
          "min_result": 350,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
  },
    {
          "unit": "Unit 4",
          "env": "Pixelcopter-PLE-v0",
          "library": "reinforce",
          "min_result": 5,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
      {
          "unit": "Unit 5",
          "env": "ML-Agents-SnowballTarget",
          "library": "ml-agents",
          "min_result": -100,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
      {
          "unit": "Unit 5",
          "env": "ML-Agents-Pyramids",
          "library": "ml-agents",
          "min_result": -100,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
      {
          "unit": "Unit 6",
          "env": "PandaReachDense",
          "library": "stable-baselines3",
          "min_result": -3.5,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
      {
          "unit": "Unit 7",
          "env": "ML-Agents-SoccerTwos",
          "library": "ml-agents",
          "min_result": -100,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
      {
          "unit": "Unit 8 PI",
          "env": "LunarLander-v2",
          "library": "deep-rl-course",
          "min_result": -500,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
      {
          "unit": "Unit 8 PII",
          "env": "doom_health_gathering_supreme",
          "library": "sample-factory",
          "min_result": 5,
          "best_result": 0,
          "best_model_id": "",
          "passed_": False
    },
  ] 
  for unit in results_certification:
    if unit["unit"] == "Unit 6":
      # Since Unit 6 can use PandaReachDense-v2 or v3
      user_models = get_user_models(hf_username, "PandaReachDense-v3", unit["library"])
      if len(user_models) == 0:
        print("Empty")
        user_models = get_user_models(hf_username, "PandaReachDense-v2", unit["library"])
    elif unit["unit"] != "Unit 8 PII":
      # Get user model
      user_models = get_user_models(hf_username, unit['env'], unit['library'])
      # For sample factory vizdoom we don't have env tag for now
    else: 
      user_models = get_user_sf_models(hf_username, unit['env'], unit['library'])
    
    # Calculate the best result and get the best_model_id
    best_result, best_model_id = calculate_best_result(user_models)

    # Save best_result and best_model_id
    unit["best_result"] = best_result
    unit["best_model_id"] = make_clickable_model(best_model_id)

    # Based on best_result do we pass the unit?
    check_if_passed(unit)
    unit["passed"] = pass_emoji(unit["passed_"])
    
  print(results_certification)
 
  df1 = pd.DataFrame(results_certification)

  df = df1[['passed', 'unit', 'env', 'min_result', 'best_result', 'best_model_id']]

  certificate, message, pdf, pass_ = verify_certification(results_certification, hf_username, first_name, last_name)
  print("MESSAGE", message)

  if pass_:
    visible = True
  else:
    visible = False

  
  return message, pdf, certificate, df, output_row.update(visible=visible) 

"""
Verify that the user pass.
If yes:
- Generate the certification
- Send an email
- Print the certification

If no:
- Explain why the user didn't pass yet
"""
def verify_certification(df, hf_username, first_name, last_name):
  # Check that we pass
  model_pass_nb = 0
  pass_percentage = 0
  pass_ = False

  for unit in df:
    if unit["passed_"] is True:
      model_pass_nb += 1
  
  pass_percentage = (model_pass_nb/11) * 100 
  print("pass_percentage", pass_percentage)
  
  if pass_percentage == 100:
    pass_ = True
    # Generate a certificate of excellence
    certificate, pdf = generate_certificate("./certificate_models/certificate-excellence.png", first_name, last_name)

    # Add this user to our database
    add_certified_user(hf_username, first_name, last_name, pass_percentage)
    
    # Add a message
    message = """
    Congratulations, you successfully completed the Hugging Face Deep Reinforcement Learning Course πŸŽ‰! \n 
    Since you pass 100% of the hands-on you get a Certificate of Excellence πŸŽ“. \n
    You can download your certificate below ⬇️ \n
    Don't hesitate to share your certificate image below on Twitter and Linkedin (you can tag me @ThomasSimonini and @huggingface) πŸ€—
    """

  elif pass_percentage < 100 and pass_percentage >= 80:
    pass_ = True
    # Certificate of completion
    certificate, pdf = generate_certificate("./certificate_models/certificate-completion.png", first_name, last_name)

    # Add this user to our database
    add_certified_user(hf_username, first_name, last_name, pass_percentage)

    # Add a message
    message = """
    Congratulations, you successfully completed the Hugging Face Deep Reinforcement Learning Course πŸŽ‰! \n 
    Since you pass 80% of the hands-on you get a Certificate of Completion πŸŽ“. \n 
    You can download your certificate below ⬇️ \n
    Don't hesitate to share your certificate image below on Twitter and Linkedin (you can tag me @ThomasSimonini and @huggingface) πŸ€— \n
    You can try to get a Certificate of Excellence if you pass 100% of the hands-on, don't hesitate to check which unit you didn't pass and update these models.
    """
  
  else:
    # Not pass yet
    certificate = Image.new("RGB", (100, 100), (255, 255, 255))
    pdf = "./fail.pdf"
          
    # Add a message
    message = """
    You didn't pass the minimum of 80% of the hands-on to get a certificate of completion. But don't be discouraged! \n 
    Check below which units you need to do again to get your certificate πŸ’ͺ
    """
  print("return certificate")
  return certificate, message, pdf, pass_
    

def generate_certificate(certificate_model, first_name, last_name):
    im = Image.open(certificate_model)
    d = ImageDraw.Draw(im)

    name_font = ImageFont.truetype("Quattrocento-Regular.ttf", 100)
    date_font = ImageFont.truetype("Quattrocento-Regular.ttf", 48)
    
    name = str(first_name) + " " + str(last_name)
    print("NAME", name)
    
    # Debug line name
    #d.line(((200, 740), (1800, 740)), "gray")
    #d.line(((1000, 0), (1000, 1400)), "gray")
    
    # Name
    d.text((1000, 740), name, fill="black", anchor="mm", font=name_font)

    # Debug line date
    #d.line(((1500, 0), (1500, 1400)), "gray")

    # Date of certification
    d.text((1480, 1170), str(date.today()), fill="black", anchor="mm", font=date_font)


    pdf = im.convert('RGB')
    pdf.save('certificate.pdf')

    return im, "./certificate.pdf"



def add_certified_user(hf_username, first_name, last_name, pass_percentage):
  """
  Add the certified user to the database
  """
  print("ADD CERTIFIED USER")
  repo.git_pull()
  history = pd.read_csv(os.path.join(CERTIFIED_USERS_DIR, CERTIFIED_USERS_FILENAME))

  # Check if this hf_username is already in our dataset:
  check = history.loc[history['hf_username'] == hf_username]
  if not check.empty:
    history = history.drop(labels=check.index[0], axis=0)
  
  new_row = pd.DataFrame({'hf_username': hf_username, 'first_name': first_name, 'last_name': last_name, 'pass_percentage': pass_percentage, 'datetime': time.time()}, index=[0])
  history = pd.concat([new_row, history[:]]).reset_index(drop=True)
    
  history.to_csv(os.path.join(CERTIFIED_USERS_DIR, CERTIFIED_USERS_FILENAME), index=False)
  repo.push_to_hub(commit_message="Update certified users list")


with gr.Blocks() as demo:
    gr.Markdown(f"""
    # Get your Deep Reinforcement Learning Certificate πŸŽ“
    The certification process is completely free:

    - To get a *certificate of completion*: you need to **pass 80% of the assignments**.
    - To get a *certificate of honors*: you need to **pass 100% of the assignments**.

    There's **no deadlines, the course is self-paced**.

    For more information about the certification process [check this](https://huggingface.co/deep-rl-course/communication/certification)

    Don’t hesitate to share your certificate on Twitter (tag me @ThomasSimonini and @huggingface) and on Linkedin.
    """)
    
    hf_username = gr.Textbox(placeholder="ThomasSimonini", label="Your Hugging Face Username (case sensitive)")
    first_name = gr.Textbox(placeholder="Jane", label="Your First Name")
    last_name = gr.Textbox(placeholder="Doe", label="Your Last Name")
    #email = gr.Textbox(placeholder="jane.doe@gmail.com", label="Your Email (to receive your certificate)")
    check_progress_button = gr.Button(value="Check if I pass")
    output_text = gr.components.Textbox()
    with gr.Row(visible=True) as output_row:
        output_pdf = gr.File()
        output_img = gr.components.Image(type="pil")
    output_dataframe = gr.components.Dataframe(headers=["Pass?", "Unit", "Environment", "Baseline", "Your best result", "Your best model id"], datatype=["markdown", "markdown", "markdown", "number", "number", "markdown", "bool"]) #value= certification(hf_username, first_name, last_name), 
    check_progress_button.click(fn=certification, inputs=[hf_username, first_name, last_name], outputs=[output_text, output_pdf, output_img, output_dataframe, output_row])#[output1, output2])

    
demo.launch(debug=True)