import gradio as gr
import torch
from transformers import AutoFeatureExtractor, AutoModelForImageClassification, pipeline
import os 
from numpy import exp
import pandas as pd
from PIL import Image
import urllib.request 
import uuid
uid = uuid.uuid4()

# Reordered models as requested
models = [
    "umm-maybe/AI-image-detector",
    "Organika/sdxl-detector",
    "cmckinle/sdxl-flux-detector",
]

pipe0 = pipeline("image-classification", f"{models[0]}")
pipe1 = pipeline("image-classification", f"{models[1]}")
pipe2 = pipeline("image-classification", f"{models[2]}")

fin_sum = []

def softmax(vector):
    e = exp(vector - vector.max())  # for numerical stability
    return e / e.sum()

def image_classifier0(image):
    labels = ["AI", "Real"]
    outputs = pipe0(image)
    results = {}
    for idx, result in enumerate(outputs):
        results[labels[idx]] = float(outputs[idx]['score'])  # Convert to float
    fin_sum.append(results)
    return results

def image_classifier1(image):
    labels = ["AI", "Real"]
    outputs = pipe1(image)
    results = {}
    for idx, result in enumerate(outputs):
        results[labels[idx]] = float(outputs[idx]['score'])  # Convert to float
    fin_sum.append(results)
    return results

def image_classifier2(image):
    labels = ["AI", "Real"]
    outputs = pipe2(image)
    results = {}
    for idx, result in enumerate(outputs):
        results[labels[idx]] = float(outputs[idx]['score'])  # Convert to float
    fin_sum.append(results)
    return results

def aiornot0(image):
    labels = ["AI", "Real"]
    mod = models[0]
    feature_extractor0 = AutoFeatureExtractor.from_pretrained(mod)
    model0 = AutoModelForImageClassification.from_pretrained(mod)
    input = feature_extractor0(image, return_tensors="pt")
    with torch.no_grad():
        outputs = model0(**input)
        logits = outputs.logits
        probability = softmax(logits)  # Apply softmax on logits
        px = pd.DataFrame(probability.numpy())
    prediction = logits.argmax(-1).item()
    label = labels[prediction]

    html_out = f"""
    <h1>This image is likely: {label}</h1><br><h3>
    Probabilities:<br>
    Real: {float(px[1][0])}<br>
    AI: {float(px[0][0])}"""
    
    results = {
        "Real": float(px[1][0]),
        "AI": float(px[0][0])
    }
    fin_sum.append(results)
    return gr.HTML.update(html_out), results

def aiornot1(image):
    labels = ["AI", "Real"]
    mod = models[1]
    feature_extractor1 = AutoFeatureExtractor.from_pretrained(mod)
    model1 = AutoModelForImageClassification.from_pretrained(mod)
    input = feature_extractor1(image, return_tensors="pt")
    with torch.no_grad():
        outputs = model1(**input)
        logits = outputs.logits
        probability = softmax(logits)  # Apply softmax on logits
        px = pd.DataFrame(probability.numpy())
    prediction = logits.argmax(-1).item()
    label = labels[prediction]

    html_out = f"""
    <h1>This image is likely: {label}</h1><br><h3>
    Probabilities:<br>
    Real: {float(px[1][0])}<br>
    AI: {float(px[0][0])}"""
    
    results = {
        "Real": float(px[1][0]),
        "AI": float(px[0][0])
    }
    fin_sum.append(results)
    return gr.HTML.update(html_out), results

def aiornot2(image):
    labels = ["AI", "Real"]
    mod = models[2]
    feature_extractor2 = AutoFeatureExtractor.from_pretrained(mod)
    model2 = AutoModelForImageClassification.from_pretrained(mod)
    input = feature_extractor2(image, return_tensors="pt")
    with torch.no_grad():
        outputs = model2(**input)
        logits = outputs.logits
        probability = softmax(logits)  # Apply softmax on logits
        px = pd.DataFrame(probability.numpy())
    prediction = logits.argmax(-1).item()
    label = labels[prediction]

    html_out = f"""
    <h1>This image is likely: {label}</h1><br><h3>
    Probabilities:<br>
    Real: {float(px[1][0])}<br>
    AI: {float(px[0][0])}"""
    
    results = {
        "Real": float(px[1][0]),
        "AI": float(px[0][0])
    }
    fin_sum.append(results)
    return gr.HTML.update(html_out), results

def load_url(url):
    try:
        urllib.request.urlretrieve(f'{url}', f"{uid}tmp_im.png")
        image = Image.open(f"{uid}tmp_im.png")
        mes = "Image Loaded"
    except Exception as e:
        image = None
        mes = f"Image not Found<br>Error: {e}"
    return image, mes

def tot_prob():
    try:
        fin_out = sum([result["Real"] for result in fin_sum]) / len(fin_sum)
        fin_sub = 1 - fin_out
        out = {
            "Real": f"{fin_out}",
            "AI": f"{fin_sub}"
        }
        return out
    except Exception as e:
        print(e)
        return None

def fin_clear():
    fin_sum.clear()
    return None

def upd(image):
    rand_im = uuid.uuid4()
    image.save(f"{rand_im}-vid_tmp_proc.png")
    out = Image.open(f"{rand_im}-vid_tmp_proc.png")
    return out

with gr.Blocks() as app:
    gr.Markdown("""<center><h1>AI Image Detector<br><h4>(Test Demo - accuracy varies by model)""")
    with gr.Column():
        inp = gr.Image(type='pil')
        in_url = gr.Textbox(label="Image URL")
        with gr.Row():
            load_btn = gr.Button("Load URL")
            btn = gr.Button("Detect AI")
        mes = gr.HTML("""""")

    with gr.Group():
        with gr.Row():
            fin = gr.Label(label="Final Probability", visible=False)
        with gr.Row():
            # Updated model names
            with gr.Box():
                lab0 = gr.HTML(f"""<b>Testing on Original Model: <a href='https://huggingface.co/{models[0]}'>{models[0]}</a></b>""")
                nun0 = gr.HTML("""""")
            with gr.Box():
                lab1 = gr.HTML(f"""<b>Testing on SDXL Fine Tuned Model: <a href='https://huggingface.co/{models[1]}'>{models[1]}</a></b>""")
                nun1 = gr.HTML("""""")
            with gr.Box():
                lab2 = gr.HTML(f"""<b>Testing on SDXL and Flux Fine Tuned Model: <a href='https://huggingface.co/{models[2]}'>{models[2]}</a></b>""")
                nun2 = gr.HTML("""""")
        with gr.Row():
            with gr.Box():
                n_out0 = gr.Label(label="Output")
                outp0 = gr.HTML("""""")
            with gr.Box():
                n_out1 = gr.Label(label="Output")
                outp1 = gr.HTML("""""")
            with gr.Box():
                n_out2 = gr.Label(label="Output")
                outp2 = gr.HTML("""""")

    btn.click(fin_clear, None, fin, show_progress=False)
    load_btn.click(load_url, in_url, [inp, mes])

    btn.click(aiornot0, [inp], [outp0, n_out0]).then(tot_prob, None, fin, show_progress=False)
    btn.click(aiornot1, [inp], [outp1, n_out1]).then(tot_prob, None, fin, show_progress=False)
    btn.click(aiornot2, [inp], [outp2, n_out2]).then(tot_prob, None, fin, show_progress=False)

    btn.click(image_classifier0, [inp], [n_out0]).then(tot_prob, None, fin, show_progress=False)
    btn.click(image_classifier1, [inp], [n_out1]).then(tot_prob, None, fin, show_progress=False)
    btn.click(image_classifier2, [inp], [n_out2]).then(tot_prob, None, fin, show_progress=False)

app.launch(show_api=False, max_threads=24)