import streamlit as st
from llama_cpp import Llama

# Initialize the Llama model
@st.cache_resource  # Cache the model to avoid reloading it on every run
def load_llama_model():
    return Llama.from_pretrained(
        repo_id="Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF",
        filename="Lexi-Llama-3-8B-Uncensored_F16.gguf",
    )

# Title and description
st.title("AI Coin Error Detector")
st.write("This AI uses the Llama model to analyze coins for potential errors.")

# Load the model
model = load_llama_model()

# User input: Upload an image of a coin
uploaded_file = st.file_uploader("Upload a coin image (optional):", type=["jpg", "jpeg", "png"])
coin_description = st.text_area("Describe the coin (e.g., year, denomination, visible features):")

if st.button("Analyze"):
    if not coin_description and not uploaded_file:
        st.error("Please upload an image or provide a description of the coin.")
    else:
        # Generate prompt based on input
        prompt = "Analyze the following coin for errors:\n"
        if coin_description:
            prompt += f"Description: {coin_description}\n"
        if uploaded_file:
            prompt += "Image has been uploaded. Please account for its visual features.\n"

        # Run the Llama model
        response = model.create_chat_completion(
            messages=[{"role": "user", "content": prompt}]
        )

        # Display the result
        st.write("### AI Response:")
        st.write(response["choices"][0]["message"]["content"])