import gradio as gr
import pandas as pd
import numpy as np
import easyocr
import torch
import cv2
import PIL
import sys 
import os
import uuid
from PIL import ImageFont, ImageDraw, Image
this=gr.Interface.load("spaces/Omnibus/Translate-100_link")

 
lang_id = {
    "":"",
    "Afrikaans": "af",
    "Albanian": "sq",
    "Amharic": "am",
    "Arabic": "ar",
    "Armenian": "hy",
    "Asturian": "ast",
    "Azerbaijani": "az",
    "Bashkir": "ba",
    "Belarusian": "be",
    "Bulgarian": "bg",
    "Bengali": "bn",
    "Breton": "br",
    "Bosnian": "bs",
    "Burmese": "my",
    "Catalan": "ca",
    "Cebuano": "ceb",
    "Chinese": "zh",
    "Chinese (simplified)": "zh",
    "Chinese (traditional)": "zh",    
    "Croatian": "hr",
    "Czech": "cs",
    "Danish": "da",
    "Dutch": "nl",
    "English": "en",
    "Estonian": "et",
    "Fulah": "ff",
    "Finnish": "fi",
    "French": "fr",
    "Western Frisian": "fy",
    "Gaelic": "gd",
    "Galician": "gl",
    "Georgian": "ka",
    "German": "de",
    "Greek": "el",
    "Gujarati": "gu",
    "Hausa": "ha",
    "Hebrew": "he",
    "Hindi": "hi",
    "Haitian": "ht",
    "Hungarian": "hu",
    "Irish": "ga",
    "Indonesian": "id",
    "Igbo": "ig",
    "Iloko": "ilo",
    "Icelandic": "is",
    "Italian": "it",
    "Japanese": "ja",
    "Javanese": "jv",
    "Kazakh": "kk",
    "Central Khmer": "km",
    "Kannada": "kn",
    "Korean": "ko",
    "Luxembourgish": "lb",
    "Ganda": "lg",
    "Lingala": "ln",
    "Lao": "lo",
    "Lithuanian": "lt",
    "Latvian": "lv",
    "Malagasy": "mg",
    "Macedonian": "mk",
    "Malayalam": "ml",
    "Mongolian": "mn",
    "Marathi": "mr",
    "Malay": "ms",
    "Nepali": "ne",
    "Norwegian": "no",
    "Northern Sotho": "ns",
    "Occitan": "oc",
    "Oriya": "or",
    "Panjabi": "pa",
    "Persian": "fa",
    "Polish": "pl",
    "Pushto": "ps",
    "Portuguese": "pt",
    "Romanian": "ro",
    "Russian": "ru",
    "Sindhi": "sd",
    "Sinhala": "si",
    "Slovak": "sk",
    "Slovenian": "sl",
    "Spanish": "es",
    "Somali": "so",
    "Serbian": "sr",
    "Serbian (cyrillic)": "sr",
    "Serbian (latin)": "sr",    
    "Swati": "ss",
    "Sundanese": "su",
    "Swedish": "sv",
    "Swahili": "sw",
    "Tamil": "ta",
    "Thai": "th",
    "Tagalog": "tl",
    "Tswana": "tn",
    "Turkish": "tr",
    "Ukrainian": "uk",
    "Urdu": "ur",
    "Uzbek": "uz",
    "Vietnamese": "vi",
    "Welsh": "cy",
    "Wolof": "wo",
    "Xhosa": "xh",
    "Yiddish": "yi",
    "Yoruba": "yo",
    "Zulu": "zu",
}
ocr_id = {
    "":"",
    "Afrikaans": "af",
    "Albanian": "sq",
    "Arabic": "ar",
    "Azerbaijani": "az",
    "Belarusian": "be",
    "Bulgarian": "bg",
    "Bengali": "bn",
    "Bosnian": "bs",
    "Chinese (simplified)": "ch_sim",
    "Chinese (traditional)": "ch_tra",
    "Croatian": "hr",
    "Czech": "cs",
    "Danish": "da",
    "Dutch": "nl",
    "English": "en",
    "Estonian": "et",
    "French": "fr",
    "German": "de",
    "Irish": "ga",
    "Hindi": "hi",
    "Hungarian": "hu",
    "Indonesian": "id",
    "Icelandic": "is",
    "Italian": "it",
    "Japanese": "ja",
    "Kannada": "kn",
    "Korean": "ko",
    "Lithuanian": "lt",
    "Latvian": "lv",
    "Mongolian": "mn",
    "Marathi": "mr",
    "Malay": "ms",
    "Nepali": "ne",
    "Norwegian": "no",
    "Occitan": "oc",
    "Polish": "pl",
    "Portuguese": "pt",
    "Romanian": "ro",
    "Russian": "ru",
    "Serbian (cyrillic)": "rs_cyrillic",
    "Serbian (latin)": "rs_latin",
    "Slovak": "sk",
    "Slovenian": "sl",
    "Spanish": "es",
    "Swedish": "sv",
    "Swahili": "sw",
    "Tamil": "ta",
    "Thai": "th",
    "Tagalog": "tl",
    "Turkish": "tr",
    "Ukrainian": "uk",
    "Urdu": "ur",
    "Uzbek": "uz",
    "Vietnamese": "vi",
    "Welsh": "cy",
    "Zulu": "zu",
}


def blur_im(img,bounds,target_lang,trans_lang,ocr_sens,font_fac,t_color):
    im = cv2.imread(img)
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    if t_color == "Black":
        t_fill = (0,0,0)
        pass
    elif t_color == "White":
        t_fill = (255,255,255)
        pass
    for bound in bounds:
        if bound[2]>=(ocr_sens):
            p0, p1, p2, p3 = bound[0]
            x = int(p0[0])
            y = int(p0[1])
            w = int(p2[0]) - int(x)
            h = int(p2[1]) - int(y)
            kernel = np.ones((3, 3), np.uint8)
            if t_color=="Black":
                im[y:y+h, x:x+w] = cv2.dilate(im[y:y+h, x:x+w], kernel, iterations=3)
                pass
            elif t_color=="White":
                im[y:y+h, x:x+w] = cv2.erode(im[y:y+h, x:x+w], kernel, iterations=1)
                pass
            else:
                pass
            
            im[y:y+h, x:x+w] = cv2.GaussianBlur(im[y:y+h, x:x+w],(51,51),0)
        else:
            pass
    im = Image.fromarray(im)
    for bound in bounds:
        if bound[2]>=(ocr_sens):
            p0, p1, p2, p3 = bound[0]
            x = int(p0[0])
            y = int(p0[1])
            w = int(p2[0]) - int(x)
            h = int(p2[1]) - int(y)
            draw = ImageDraw.Draw(im)
            text = this(bound[1],target_lang,trans_lang)
            font_size=int(int(h)*font_fac)
            font = ImageFont.truetype("./fonts/unifont-15.0.01.ttf", int(font_size))
            draw.text((x, y),text, font = font, fill=t_fill)
        else:
            pass
    return im
    
def draw_boxes(image, bounds, ocr_sens,width=1):
    draw = ImageDraw.Draw(image)
    for bound in bounds:
        if bound[2]>=(ocr_sens):
            color = "blue"
        else:
            color = "red"
        p0, p1, p2, p3 = bound[0]
        draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
    return image

def detect(img, target_lang,trans_lang,ocr_sens,font_fac,t_color):
    '''
    if target_lang2 != None and target_lang2 != "":
        lang=f"{lang_id[target_lang]}"
        lang2=f"{lang_id[target_lang2]}"
        lang=[lang,lang2]
    else:
    '''
    lang=[f"{ocr_id[target_lang]}"]
        

    img = Image.open(img)
    img.thumbnail((1000,1000), Image.Resampling.LANCZOS)
    path = f"/tmp/{uuid.uuid4()}.jpg"
    img.save(path)
    img1 = np.array(img)
    reader = easyocr.Reader(lang)
    bounds = reader.readtext(img1)
    im = PIL.Image.open(path)
    im_out=draw_boxes(im, bounds,ocr_sens)
    blr_out=blur_im(path,bounds,target_lang,trans_lang,ocr_sens,font_fac,t_color)
    return im_out,blr_out,pd.DataFrame(bounds),pd.DataFrame(bounds).iloc[:,1:]
    
with gr.Blocks() as robot:
    gr.Markdown("""<h1><center>Translate Image to Image</center></h1><h4><center>EasyOCR and facebook/m2m100_1.2B</center></h4><h7><center>If you recieve other users images, please report it in the Community button</center></h7><h7><center>*translation may not be accurate</center></h7>""")
    with gr.Accordion(label="Description",open=False):
        with gr.Row():
            gr.Markdown("""<p> Drop your image in the "Image to Translate" Box<br>
                           Select the Language to Detect in the Image<br>
                           Select the Language to Translate to<br>
                           Click "Go" Button </p>""")
            gr.Markdown("""<p> Translation Model: <a href="https://huggingface.co/facebook/m2m100_1.2B">facebook/m2m100_1.2B</a><br>
                               OCR: <a href="https://www.jaided.ai/easyocr">easyocr</a><br>
                            Influence by:<br> 
                            <a href="https://huggingface.co/spaces/venz/AW-05-GR-NLP-Image2Text-Multilingual-OCR">venz/AW-05-GR-NLP-Image2Text-Multilingual-OCR</a><br>
                            <a href="https://huggingface.co/spaces/Iker/Translate-100-languages">Iker/Translate-100-languages</a><br>
                            </p>""")


    with gr.Row():
        with gr.Column():
            im=gr.Image(label="Image to Translate",type="filepath")

        with gr.Column():
            with gr.Group():
                with gr.Row():
                    with gr.Column():
                        target_lang = gr.Dropdown(label="Detect language:", choices=list(ocr_id.keys()),value="English")
                        trans_lang = gr.Dropdown(label="Translate to:", choices=list(lang_id.keys()),value="Chinese")
                    with gr.Column():
                        ocr_sens=gr.Slider(0.1, 1, step=0.05,value=0.25,label="Detect Min Confidence")
                        font_fac=gr.Slider(0.1, 1, step =0.1,value=0.4,label="Font Scale")
                        ocr_space=gr.Slider(1,10, step=1,value=5,label="Future Function")     
                        text_color=gr.Radio(label="Font Color",choices=["Black", "White"], value="Black")

            go_btn=gr.Button("Go")
    with gr.Row():
        with gr.Column():
            out_im=gr.Image()
        with gr.Column():
            trans_im=gr.Image()            

    with gr.Row():
        out_txt=gr.Textbox(lines=8)
        data_f=gr.Dataframe()
                        
    go_btn.click(detect,[im,target_lang,trans_lang,ocr_sens,font_fac,text_color],[out_im,trans_im,out_txt,data_f])
    #go_btn.click(detect,[im,target_lang,target_lang2],[out_im,trans_im,out_txt,data_f])
robot.queue(concurrency_count=10).launch()