File size: 3,015 Bytes
60a054a
e5d20c1
bfac87d
 
 
e5d20c1
abe60f3
 
 
e5d20c1
abe60f3
bfac87d
 
 
 
 
abe60f3
bfac87d
 
 
 
 
 
 
 
 
 
 
abe60f3
e5d20c1
abe60f3
 
bfac87d
abe60f3
bfac87d
 
 
 
 
 
 
abe60f3
bfac87d
 
 
 
 
 
 
 
 
 
abe60f3
bfac87d
 
abe60f3
bfac87d
 
 
 
 
 
 
abe60f3
bfac87d
 
 
 
 
 
 
 
 
abe60f3
bfac87d
 
 
 
 
 
 
 
 
abe60f3
bfac87d
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
import pandas as pd
from transformers import pipeline
from PIL import Image
import requests

# ---- SETUP ----
# Load dataset (make sure 'gldv2_info.csv' is in the project root)
df = pd.read_csv("gldv2_info.csv")

# Load a free mythology/historical text generator from Hugging Face
try:
    story_pipe = pipeline("text-generation", model="Samurai719214/gptneo-mythology-storyteller")
except Exception:
    story_pipe = pipeline("text-generation", model="mahing/historical-narrative-generator")

# Optional: set up text-to-image only if 'diffusers' is available and desired
try:
    from diffusers import StableDiffusionPipeline
    import torch
    sd_pipe = StableDiffusionPipeline.from_pretrained(
        "stabilityai/stable-diffusion-xl-base-1.0",
        torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
    )
    sd_pipe = sd_pipe.to("cuda" if torch.cuda.is_available() else "cpu")
except Exception:
    sd_pipe = None

# ---- APP FUNCTION ----
def landmark_explorer(site_name):
    site_name_clean = site_name.strip().lower()
    matches = df[df['name'].str.strip().str.lower() == site_name_clean]
    images = []
    # Gather all valid images
    for url in matches['url']:
        try:
            img = Image.open(requests.get(url, stream=True, timeout=6).raw).convert("RGB")
            images.append(img)
        except Exception:
            continue

    # Generate historical/myth story
    story_prompt = (
        f"Tell a mythological and historical story about the site: {site_name} in Egypt. "
        "Include both legend and real archaeological facts where possible."
    )
    try:
        out = story_pipe(story_prompt, max_length=380, do_sample=True)
        story = out[0]['generated_text']
    except Exception:
        story = "Unable to generate story for this landmark at the moment."

    # If no images, generate one with Stable Diffusion (optional)
    if not images and sd_pipe is not None:
        try:
            images = [sd_pipe(f"{site_name} in ancient Egypt, detailed, realistic, landscape").images[0]]
        except Exception:
            pass

    if not images:
        return story, None
    return story, images

# ---- GRADIO UI ----
with gr.Blocks() as demo:
    gr.Markdown(
        """
        # 🏛️ Egyptian Landmark Explorer
        Enter the name of an ancient Egyptian landmark (from the dataset) to see a mythological and historical story, plus images!
        """
    )
    name_input = gr.Textbox(label="Enter Landmark Name (e.g., Great_Pyramid_of_Giza)")
    story_output = gr.Textbox(label="Generated Story")
    gallery_output = gr.Gallery(label="Images", columns=3)
    run_btn = gr.Button("Explore")

    run_btn.click(
        landmark_explorer,
        inputs=name_input,
        outputs=[story_output, gallery_output]
    )

    gr.Markdown(
        "Sample landmark names: Great_Pyramid_of_Giza, Karnak, Temple_of_Edfu, Bab_al-Nasr_(Cairo), etc."
    )

if __name__ == "__main__":
    demo.launch()