import multiprocessing
import pathlib
import re
import time
import traceback

import orjson
import tqdm
import typer
from bs4 import BeautifulSoup, Tag
from markdownify import MarkdownConverter, chomp

CONCURRENT = 64


class WikiConverter(MarkdownConverter):
    def convert_a(self, el, text, convert_as_inline):
        prefix, suffix, text = chomp(text)
        if not text:
            return ""
        return "%s%s%s" % (prefix, text, suffix)

    integer_rgx = re.compile("^[0-9]*$")

    @staticmethod
    def is_intable(string: str):
        if not string or not string.isdigit():
            return False
        if WikiConverter.integer_rgx.match(string):
            return True

    def convert_img(self, el, text, convert_as_inline):
        convert_as_inline = True
        alt = el.attrs.get("alt", None) or ""
        if (
            convert_as_inline
            and el.parent.name not in self.options["keep_inline_images_in"]
        ):
            return alt
        return alt

    def convert_li(self, el, text, convert_as_inline):
        parent = el.parent
        if parent is not None and parent.name == "ol":
            start = parent.get("start")
            if start and WikiConverter.is_intable(start.strip()):
                start = int(start.strip())
            else:
                start = 1
            bullet = "%s." % (start + parent.index(el))
        else:
            depth = -1
            while el:
                if el.name == "ul":
                    depth += 1
                el = el.parent
            bullets = self.options["bullets"]
            bullet = bullets[depth % len(bullets)]
        return "%s %s\n" % (bullet, (text or "").strip())


class MultilangWikipediaProcessor:
    def __init__(self) -> None:
        self.md = WikiConverter()

    def is_stub(self, soup: BeautifulSoup):
        for plainlinks in soup.select(".metadata.plainlinks"):
            if "stub" in plainlinks.get("id", "") or "stub" in plainlinks.get(
                "class", []
            ):
                return True
        return False

    def rital_ambox(self, input_soup: BeautifulSoup):
        ambox_classes = []
        
        for ambox in selects:
            if ambox is not None:
                ambox_classes.append(ambox.get("class"))
        for ambox in selects:
            ambox.decompose()
        return input_soup, ambox_classes

    def rital_ombox(self, input_soup: BeautifulSoup):
        ombox_classes = []
        selects = input_soup.select('table[class~="ombox"]')
        for ombox in selects:
            if ombox is not None:
                ombox_classes.append(ombox["class"])
        for ombox in selects:
            ombox.decompose()
        return input_soup, ombox_classes

    def table_filtration(self, input_soup: BeautifulSoup, title):
        for table in input_soup.select("table"):
            tds = len(table.find_all("td"))
            texsize = len(table.get_text().replace(" ", ""))
            if tds >= texsize and texsize < 50:
                print(table.get_text().replace(" ", ""))
                print("Removing table from", title, ". TD exceeds Content")
        return input_soup

    all_selectors = [
        "style",  # Remove styling
        "sup.reference",  # Seems to still exist across
        "table.nomobile",  # Seems to still exist across
    ]

    def process_infobox(self, infobox: Tag):
        return str(infobox)

    def process_figures(self, figure: Tag):
        figure_data = {}
        fig_a = figure.find("a")
        fig_cap = figure.find("figcaption")
        if fig_a:
            figure_data["file_url"] = fig_a.get("href", None)
        else:
            figure_data["file_url"] = None
        if fig_cap:
            figure_data["caption"] = fig_cap.get_text()
        else:
            figure_data["caption"] = None
        if figure_data["caption"] == figure_data["file_url"] is None:
            return None
        return figure_data

    def convert_soup(self, input_soup: BeautifulSoup):
        [i.unwrap() for i in input_soup.select('[data-mw^="interface"]')]
        [i.decompose() for i in input_soup.select(", ".join(self.all_selectors))]
        title = input_soup.select_one("title").extract()
        # titletext = title.get_text()

        for i in input_soup.select(".mw-collapsible"):
            hidden = i.select_one("div.hidden-content")
            if hidden:
                # Expose collapsed content
                hidden["class"].remove("hidden-content")
        for i in input_soup.select("[data-mw]"):
            i["data-mw"] = ""

        ifbs = [i.extract() for i in input_soup.select("table.infobox")]
        ifbs += [i.extract() for i in input_soup.select("table.sidebar.vcard.hlist")]
        ifbs += [i.extract() for i in input_soup.select("table.infobox.vcard")]
        ifbs = [self.process_infobox(ifb) for ifb in ifbs]
        figures = [
            self.process_figures(fig.extract())
            for fig in input_soup.select('figure[typeof^="mw:File/Thumb"]')
        ]

        return input_soup, ifbs, figures, title

    def convert(self, wiki_data: bytes):
        data = orjson.loads(wiki_data.rstrip(b"\n"))
        try:
            templates = [
                ":".join(template["name"].split(":")[1:])
                for template in data.get("templates", [])
            ]
            categories = [
                ":".join(category["name"].split(":")[1:])
                for category in data.get("categories", [])
            ]
            if not data["article_body"].get("wikitext"):
                return None
            soup = BeautifulSoup(data["article_body"]["html"], "lxml")
            is_stub = self.is_stub(soup)
            soup, infobox, figures, title = self.convert_soup(soup)

            # soup, issues, issue_selectors = self.remove_templates(soup, all_templates)
            soup, amboxes = self.rital_ambox(soup)
            soup, omboxes = self.rital_ombox(soup)
            soup = self.table_filtration(soup, title)
            text = (
                self.md.convert_soup(soup)
                .strip()
                .replace("\n\n", "\n")
                .replace("\n\n\n", "\n\n")
            )
            return orjson.dumps(
                {
                    "id": data["identifier"],
                    "title": data["name"],
                    "url": data["url"],
                    "stub": is_stub,
                    "template": templates,
                    "category": categories,
                    "license": [lic["name"] for lic in data["license"]],
                    "text": text,
                    "wikitext": data["article_body"].get("wikitext"),
                    "lang": data["in_language"]["identifier"],
                    "abstract": data.get("abstract", ""),
                    "boxes_filters": amboxes + omboxes,
                    "infobox_html": infobox,
                    "figures_dict": figures,
                }
            )
        except Exception as e:
            print(f"Exception at Soup Conversion: {e} [{data['name']}]")
            return None


queue = multiprocessing.Queue(maxsize=1024 * 1024)


def worker(worker_idx: int, prefix_path: pathlib.Path):
    if prefix_path.parent:
        prefix_path.parent.mkdir(exist_ok=True, parents=True)
    processor = MultilangWikipediaProcessor()
    prefix_path = prefix_path.with_name(
        f"{prefix_path.name}-{str(worker_idx).zfill(2)}.jsonl"
    )
    with open(prefix_path, "wb") as f:
        while True:
            data = queue.get()
            if data is None:
                break
            # print(data[:16])
            parsed = processor.convert(data)
            if parsed:
                f.write(parsed)
                f.write(b"\n")


app = typer.Typer()


def err_cb(err: Exception):
    print(traceback.format_exception(err)[0])


@app.command()
def main(folder: pathlib.Path, prefix_path: pathlib.Path, processes: int = 64):
    # orjson.loads()
    with multiprocessing.Pool(processes=processes) as pool:
        workers = [
            pool.apply_async(
                worker, args=(worker_idx, prefix_path), error_callback=err_cb
            )
            for worker_idx in range(processes)
        ]
        for file in pathlib.Path(folder).glob("*.ndjson"):
            with open(file, "rb") as fp:
                pbar = tqdm.tqdm(desc=f"{file.name}")
                for line in fp:
                    # print(line[:16])
                    queue.put(line)
                    pbar.update(1)
                print("processed", file.name)
                pbar.close()
        for _ in range(processes):
            queue.put(None)
        while not queue.empty():
            print("Waiting for empty queue")
            time.sleep(5)
        print("Queue is empty. Waiting for threads to finish.")
        patience = 120
        # processor_thr = 0.05
        while True:
            workers_done = 0
            for running_worker in workers:
                if running_worker.ready():
                    workers_done += 1
            if workers_done == processes:
                break
            if patience <= 0:
                # Screw it.
                pool.terminate()
                break
            else:
                if workers_done > processes // 1.5:
                    print("Waiting for dangling processes to complete.")
                    time.sleep(10)
                    patience -= 1

        # pool.join()


if __name__ == "__main__":
    app()