import re
import fitz  # PyMuPDF
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
import language_tool_python
from typing import List, Dict, Any, Tuple
from collections import Counter
import json
import traceback
import io
import tempfile
import os
import gradio as gr

# Set JAVA_HOME environment variable
os.environ['JAVA_HOME'] = '/usr/lib/jvm/java-11-openjdk-amd64'

# ------------------------------
# Analysis Functions
# ------------------------------

# def extract_pdf_text_by_page(file) -> List[str]:
#     """Extracts text from a PDF file, page by page, using PyMuPDF."""
#     if isinstance(file, str):
#         with fitz.open(file) as doc:
#             return [page.get_text("text") for page in doc]
#     else:
#         with fitz.open(stream=file.read(), filetype="pdf") as doc:
#             return [page.get_text("text") for page in doc]

def extract_pdf_text(file) -> str:
    """Extracts full text from a PDF file using PyMuPDF4LLM."""
    try:
        print(f"Opening PDF file: {file}")
        
        # Handle file path vs stream
        temp_file_path = None
        if isinstance(file, str):
            print(f"Opening file by path: {file}")
            file_path = file
        else:
            print(f"Opening file from stream")
            import tempfile
            import os
            temp_file = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
            temp_file_path = temp_file.name
            temp_file.write(file.read())
            temp_file.close()
            file_path = temp_file_path
        
        # Get page count with PyMuPDF for logging purposes
        doc = fitz.open(file_path)
        page_count = len(doc)
        doc.close()
        print(f"PDF opened successfully with {page_count} pages")
        
        # Process with pymupdf4llm
        import pymupdf4llm
        full_text = pymupdf4llm.to_markdown(file_path)
        
        # Log extraction info for each page (approximating per-page counts)
        avg_chars_per_page = len(full_text) // page_count if page_count > 0 else 0
        for page_number in range(page_count):
            print(f"Extracted {avg_chars_per_page} characters from page {page_number+1}")
        
        # Clean up temporary file if created
        if temp_file_path:
            os.remove(temp_file_path)
        
        print(f"Total extracted text length: {len(full_text)} characters.")
        print(full_text)
        return full_text
        
    except Exception as e:
        print(f"Error extracting text from PDF: {str(e)}")
        import traceback
        print(traceback.format_exc())
        return ""


def check_text_presence(full_text: str, search_terms: List[str]) -> Dict[str, bool]:
    """Checks for the presence of required terms in the text."""
    return {term: term.lower() in full_text.lower() for term in search_terms}

def label_authors(full_text: str) -> str:
    """Label authors in the text with 'Authors:' if not already labeled."""
    author_line_regex = r"^(?:.*\n)(.*?)(?:\n\n)"
    match = re.search(author_line_regex, full_text, re.MULTILINE)
    if match:
        authors = match.group(1).strip()
        return full_text.replace(authors, f"Authors: {authors}")
    return full_text

def check_metadata(full_text: str) -> Dict[str, Any]:
    """Check for metadata elements."""
    return {
        "author_email": bool(re.search(r'\b[\w.-]+?@\w+?\.\w+?\b', full_text)),
        "list_of_authors": bool(re.search(r'Authors?:', full_text, re.IGNORECASE)),
        "keywords_list": bool(re.search(r'Keywords?:', full_text, re.IGNORECASE)),
        "word_count": len(full_text.split()) or "Missing"
    }

def check_disclosures(full_text: str) -> Dict[str, bool]:
    """Check for disclosure statements."""
    # Regular search terms
    search_terms = [
        "conflict of interest statement",
        "ethics statement",
        "funding statement",
        "data access statement"
    ]
    
    # Get results for regular terms
    results = check_text_presence(full_text, search_terms)
    
    # Special check for author contribution(s) statement - either singular or plural form
    has_author_contribution = ("author contribution statement" in full_text.lower() or 
                               "author contributions statement" in full_text.lower())
    
    # Add the author contribution result to our results dictionary
    results["author contribution statement"] = has_author_contribution
    
    return results

def check_figures_and_tables(full_text: str) -> Dict[str, bool]:
    """Check for figures and tables."""
    return {
        "figures_with_citations": bool(re.search(r'Figure \d+.*?citation', full_text, re.IGNORECASE)),
        "figures_legends": bool(re.search(r'Figure \d+.*?legend', full_text, re.IGNORECASE)),
        "tables_legends": bool(re.search(r'Table \d+.*?legend', full_text, re.IGNORECASE))
    }

def check_references(full_text: str) -> Dict[str, Any]:
    """Check for references."""
    return {
        "old_references": bool(re.search(r'\b19[0-9]{2}\b', full_text)),
        "citations_in_abstract": bool(re.search(r'\b(citation|reference)\b', full_text[:1000], re.IGNORECASE)),
        "reference_count": len(re.findall(r'\[.*?\]', full_text)),
        "self_citations": bool(re.search(r'Self-citation', full_text, re.IGNORECASE))
    }

def check_structure(full_text: str) -> Dict[str, bool]:
    """Check document structure."""
    return {
        "imrad_structure": all(section in full_text for section in ["Introduction", "Methods", "Results", "Discussion"]),
        "abstract_structure": "structured abstract" in full_text.lower()
    }

def check_language_issues(full_text: str) -> Dict[str, Any]:
    """Check for language issues using LanguageTool and additional regex patterns."""
    try:
        language_tool = language_tool_python.LanguageTool('en-US')
        matches = language_tool.check(full_text)
        issues = []
        
        # Process LanguageTool matches
        for match in matches:
            # Ignore issues with rule_id 'EN_SPLIT_WORDS_HYPHEN'
            if match.ruleId == "EN_SPLIT_WORDS_HYPHEN":
                continue
                
            issues.append({
                "message": match.message,
                "context": match.context.strip(),
                "suggestions": match.replacements[:3] if match.replacements else [],
                "category": match.category,
                "rule_id": match.ruleId,
                "offset": match.offset,
                "length": match.errorLength,
                "coordinates": [],
                "page": 0
            })
        print(f"Total language issues found: {len(issues)}")
        
        # -----------------------------------
        # Additions: Regex-based Issue Detection
        # -----------------------------------
        
        # Define regex pattern to find words immediately followed by '[' without space
        regex_pattern = r'\b(\w+)\[(\d+)\]'
        regex_matches = list(re.finditer(regex_pattern, full_text))
        print(f"Total regex issues found: {len(regex_matches)}")
        
        # Process regex matches
        for match in regex_matches:
            word = match.group(1)
            number = match.group(2)
            start = match.start()
            end = match.end()
            issues.append({
                "message": f"Missing space before '[' in '{word}[{number}]'. Should be '{word} [{number}]'.",
                "context": full_text[max(match.start() - 30, 0):min(match.end() + 30, len(full_text))].strip(),
                "suggestions": [f"{word} [{number}]", f"{word} [`{number}`]", f"{word} [number {number}]"],
                "category": "Formatting",
                "rule_id": "SPACE_BEFORE_BRACKET",
                "offset": match.start(),
                "length": match.end() - match.start(),
                "coordinates": [],
                "page": 0
            })
        
        print(f"Total combined issues found: {len(issues)}")
        
        return {
            "total_issues": len(issues),
            "issues": issues
        }
    except Exception as e:
        print(f"Error checking language issues: {e}")
        return {"error": str(e)}

def check_language(full_text: str) -> Dict[str, Any]:
    """Check language quality."""
    return {
        "plain_language": bool(re.search(r'plain language summary', full_text, re.IGNORECASE)),
        "readability_issues": False,  # Placeholder for future implementation
        "language_issues": check_language_issues(full_text)
    }

def check_figure_order(full_text: str) -> Dict[str, Any]:
    """Check if figures are referred to in sequential order."""
    figure_pattern = r'(?:Fig(?:ure)?\.?|Figure)\s*(\d+)'
    figure_references = re.findall(figure_pattern, full_text, re.IGNORECASE)
    figure_numbers = sorted(set(int(num) for num in figure_references))
    
    is_sequential = all(a + 1 == b for a, b in zip(figure_numbers, figure_numbers[1:]))
    
    if figure_numbers:
        expected_figures = set(range(1, max(figure_numbers) + 1))
        missing_figures = list(expected_figures - set(figure_numbers))
    else:
        missing_figures = None

    duplicates = [num for num, count in Counter(figure_references).items() if count > 1]
    duplicate_numbers = [int(num) for num in duplicates]
    not_mentioned = list(set(figure_references) - set(duplicates))
    
    return {
        "sequential_order": is_sequential,
        "figure_count": len(figure_numbers),
        "missing_figures": missing_figures,
        "figure_order": figure_numbers,
        "duplicate_references": duplicates,
        "not_mentioned": not_mentioned
    }

def check_reference_order(full_text: str) -> Dict[str, Any]:
    """Check if references in the main body text are in order."""
    reference_pattern = r'\[(\d+)\]'
    references = re.findall(reference_pattern, full_text)
    ref_numbers = [int(ref) for ref in references]
    
    max_ref = 0
    out_of_order = []
    for i, ref in enumerate(ref_numbers):
        if ref > max_ref + 1:
            out_of_order.append((i+1, ref))
        max_ref = max(max_ref, ref)
    
    all_refs = set(range(1, max_ref + 1))
    used_refs = set(ref_numbers)
    missing_refs = list(all_refs - used_refs)
    
    return {
        "max_reference": max_ref,
        "out_of_order": out_of_order,
        "missing_references": missing_refs,
        "is_ordered": len(out_of_order) == 0 and len(missing_refs) == 0
    }

def highlight_issues_in_pdf(file, language_matches: List[Dict[str, Any]]) -> bytes:
    """
    Highlights language issues in the PDF and returns the annotated PDF as bytes.
    This function maps LanguageTool matches to specific words in the PDF
    and highlights those words.
    """
    try:
        # Open the PDF
        doc = fitz.open(stream=file.read(), filetype="pdf") if not isinstance(file, str) else fitz.open(file)
        # print(f"Opened PDF with {len(doc)} pages.")
        # print(language_matches)
        # Extract words with positions from each page
        word_list = []  # List of tuples: (page_number, word, x0, y0, x1, y1)
        for page_number in range(len(doc)):
            page = doc[page_number]
            print(page.get_text("words"))
            words = page.get_text("words")  # List of tuples: (x0, y0, x1, y1, "word", block_no, line_no, word_no)
            for w in words:
#                 print(w)
                word_text = w[4]
                # **Fix:** Insert a space before '[' to ensure "globally [2]" instead of "globally[2]"
                # if '[' in word_text:
                #     word_text = word_text.replace('[', ' [')
                word_list.append((page_number, word_text, w[0], w[1], w[2], w[3]))
        # print(f"Total words extracted: {len(word_list)}")

        # Concatenate all words to form the full text
        concatenated_text=""
        concatenated_text = " ".join([w[1] for w in word_list])
        
        # print(f"Concatenated text length: {concatenated_text} characters.")

        # Find "Abstract" section and set the processing start point
        abstract_start = concatenated_text.lower().find("abstract")
        abstract_offset = 0 if abstract_start == -1 else abstract_start

        # Find "References" section and exclude from processing
        references_start = concatenated_text.lower().rfind("references")
        references_offset = len(concatenated_text) if references_start == -1 else references_start

        # Iterate over each language issue
        for idx, issue in enumerate(language_matches, start=1):
            offset = issue["offset"]  # offset+line_no-1
            length = issue["length"]

            # Skip issues in the references section
            if offset < abstract_offset or offset >= references_offset:
                continue
            
            
            error_text = concatenated_text[offset:offset+length]
            print(f"\nIssue {idx}: '{error_text}' at offset {offset} with length {length}")

            # Find the words that fall within the error span
            current_pos = 0
            target_words = []
            for word in word_list:
                word_text = word[1]
                word_length = len(word_text) + 1  # +1 for the space

                if current_pos + word_length > offset and current_pos < offset + length:
                    target_words.append(word)
                current_pos += word_length

            if not target_words:
                # print("No matching words found for this issue.")
                continue

            initial_x = target_words[0][2]
            initial_y = target_words[0][3]
            final_x = target_words[len(target_words)-1][4]
            final_y = target_words[len(target_words)-1][5]
            issue["coordinates"] = [initial_x, initial_y, final_x, final_y]
            issue["page"] = target_words[0][0] + 1
            # Add highlight annotations to the target words
            print()
            print("issue", issue)
            print("error text", error_text)
            print(target_words)
            print()
            for target in target_words:
                page_num, word_text, x0, y0, x1, y1 = target
                page = doc[page_num]
                # Define a rectangle around the word with some padding
                rect = fitz.Rect(x0 - 1, y0 - 1, x1 + 1, y1 + 1)
                # Add a highlight annotation
                highlight = page.add_highlight_annot(rect)
                highlight.set_colors(stroke=(1, 1, 0))  # Yellow color
                highlight.update()
                # print(f"Highlighted '{word_text}' on page {page_num + 1} at position ({x0}, {y0}, {x1}, {y1})")
            

        # Save annotated PDF to bytes
        byte_stream = io.BytesIO()
        doc.save(byte_stream)
        annotated_pdf_bytes = byte_stream.getvalue()
        doc.close()

        # Save annotated PDF locally for verification
        with open("annotated_temp.pdf", "wb") as f:
            f.write(annotated_pdf_bytes)
        # print("Annotated PDF saved as 'annotated_temp.pdf' for manual verification.")

        return language_matches, annotated_pdf_bytes
    except Exception as e:
        print(f"Error in highlighting PDF: {e}")
        return b""




# ------------------------------
# Main Analysis Function
# ------------------------------

# server/gradio_client.py

def analyze_pdf(filepath: str) -> Tuple[Dict[str, Any], bytes]:
    """Analyzes the PDF for language issues and returns results and annotated PDF."""
    try:
        full_text = extract_pdf_text(filepath)
        if not full_text:
            return {"error": "Failed to extract text from PDF."}, None
        
        # Create the results structure
        results = {
            "issues": [],  # Initialize as empty array
            "regex_checks": {
                "metadata": check_metadata(full_text),
                "disclosures": check_disclosures(full_text),
                "figures_and_tables": check_figures_and_tables(full_text),
                "references": check_references(full_text),
                "structure": check_structure(full_text),
                "figure_order": check_figure_order(full_text),
                "reference_order": check_reference_order(full_text)
            }
        }

        # Handle language issues
        language_issues = check_language_issues(full_text)
        if "error" in language_issues:
            return {"error": language_issues["error"]}, None

        issues = language_issues.get("issues", [])
        if issues:
            language_matches, annotated_pdf = highlight_issues_in_pdf(filepath, issues)
            results["issues"] = language_matches  # This is already an array from check_language_issues
            return results, annotated_pdf
        else:
            # Keep issues as empty array if none found
            return results, None

    except Exception as e:
        return {"error": str(e)}, None
# ------------------------------
# Gradio Interface
# ------------------------------

def process_upload(file):
    """
    Process the uploaded PDF file and return analysis results and annotated PDF.
    """
    # print(file.name)
    if file is None:
        return json.dumps({"error": "No file uploaded"}, indent=2), None

    # # Create a temporary file to work with
    
    # with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as temp_input:
    #     temp_input.write(file)
    #     temp_input_path = temp_input.name
    #     print(temp_input_path)
    
    temp_input = tempfile.NamedTemporaryFile(delete=False, suffix='.pdf')
    temp_input.write(file)
    temp_input_path = temp_input.name
    print(temp_input_path)
    # Analyze the PDF
    
    results, annotated_pdf = analyze_pdf(temp_input_path)
    
    print(results)
    results_json = json.dumps(results, indent=2)

    # Clean up the temporary input file
    os.unlink(temp_input_path)

    # If we have an annotated PDF, save it temporarily
    if annotated_pdf:
        with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
            tmp_file.write(annotated_pdf)
            return results_json, tmp_file.name

    return results_json, None
        
    # except Exception as e:
    #     error_message = json.dumps({
    #         "error": str(e),
    #         "traceback": traceback.format_exc()
    #     }, indent=2)
    #     return error_message, None
    

def create_interface():
    with gr.Blocks(title="PDF Analyzer") as interface:
        gr.Markdown("# PDF Analyzer")
        gr.Markdown("Upload a PDF document to analyze its structure, references, language, and more.")
        
        with gr.Row():
            file_input = gr.File(
                label="Upload PDF",
                file_types=[".pdf"],
                type="binary"
            )
        
        with gr.Row():
            analyze_btn = gr.Button("Analyze PDF")
        
        with gr.Row():
            results_output = gr.JSON(
                label="Analysis Results",
                show_label=True
            )
        
        with gr.Row():
            pdf_output = gr.File(
                label="Annotated PDF",
                show_label=True
            )
        
        analyze_btn.click(
            fn=process_upload,
            inputs=[file_input],
            outputs=[results_output, pdf_output]
        )
    
    return interface

if __name__ == "__main__":
    interface = create_interface()
    interface.launch(
        share=False,  # Set to False in production
        # server_name="0.0.0.0",
        server_port=None
    )