Spaces:
Sleeping
Sleeping
MUHAMMAD YOUSAF RANA
commited on
Commit
·
1626f55
1
Parent(s):
43c9253
initial files uploaded
Browse files- .gitattributes +35 -35
- .idea/.gitignore +8 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +7 -0
- .idea/modules.xml +8 -0
- .idea/travelling-agent.iml +8 -0
- .idea/vcs.xml +6 -0
- Dockerfile +32 -0
- README.md +12 -12
- gemini_utils.py +120 -0
- image_searcher.py +117 -0
- main.py +233 -0
- models.py +39 -0
- rag_utils.py +61 -0
- requirements.txt +12 -0
.gitattributes
CHANGED
@@ -1,35 +1,35 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.idea/.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
4 |
+
# Editor-based HTTP Client requests
|
5 |
+
/httpRequests/
|
6 |
+
# Datasource local storage ignored files
|
7 |
+
/dataSources/
|
8 |
+
/dataSources.local.xml
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="GithubDefaultAccount">
|
4 |
+
<option name="defaultAccountId" value="34b99f66-1f2e-4ee6-b01c-03df7216dada" />
|
5 |
+
</component>
|
6 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="imagebind" project-jdk-type="Python SDK" />
|
7 |
+
</project>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/travelling-agent.iml" filepath="$PROJECT_DIR$/.idea/travelling-agent.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/travelling-agent.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="jdk" jdkName="imagebind" jdkType="Python SDK" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
Dockerfile
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python runtime as a parent image
|
2 |
+
FROM python:3.9-slim
|
3 |
+
|
4 |
+
# Set the working directory in the container
|
5 |
+
WORKDIR /app
|
6 |
+
|
7 |
+
# Copy the requirements file into the container at /app
|
8 |
+
COPY requirements.txt .
|
9 |
+
|
10 |
+
# Install any needed packages specified in requirements.txt
|
11 |
+
# --no-cache-dir: Don't store the downloaded packages, keeping the image size smaller.
|
12 |
+
# --upgrade pip: Ensure you have the latest pip.
|
13 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
14 |
+
pip install --no-cache-dir -r requirements.txt
|
15 |
+
|
16 |
+
# Copy the rest of the application code into the container at /app
|
17 |
+
COPY . .
|
18 |
+
|
19 |
+
# Make port 8000 available to the world outside this container
|
20 |
+
# Hugging Face Spaces typically expect the app on port 7860, but FastAPI defaults to 8000.
|
21 |
+
# We'll run on 8000 and let HF handle mapping if needed, or adjust the CMD.
|
22 |
+
# Let's use 7860 directly as it's common for HF.
|
23 |
+
EXPOSE 7860
|
24 |
+
|
25 |
+
# Define environment variables (optional, can be set in Hugging Face secrets)
|
26 |
+
# ENV GOOGLE_API_KEY="your_key_here" # It's better to use HF Secrets
|
27 |
+
# ENV PEXELS_API_KEY="your_key_here" # It's better to use HF Secrets
|
28 |
+
|
29 |
+
# Run main.py when the container launches
|
30 |
+
# Use 0.0.0.0 to make it accessible from outside the container.
|
31 |
+
# Use port 7860 as often expected by Hugging Face Spaces.
|
32 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
-
---
|
2 |
-
title: Travelling Agent
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: gray
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
license: mit
|
9 |
-
short_description: agent that recommend the places to travel
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: Travelling Agent
|
3 |
+
emoji: ⚡
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: gray
|
6 |
+
sdk: docker
|
7 |
+
pinned: false
|
8 |
+
license: mit
|
9 |
+
short_description: agent that recommend the places to travel
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
gemini_utils.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# gemini_utils.py
|
2 |
+
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import backoff
|
6 |
+
import google.generativeai as genai
|
7 |
+
from typing import Dict, List
|
8 |
+
|
9 |
+
# Load the Gemini API key from environment variables
|
10 |
+
GOOGLE_API_KEY="AIzaSyDuDPP0eWWA0UsGBBIeOI7Fl0WcPli4Sdo"
|
11 |
+
|
12 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
13 |
+
|
14 |
+
# Initialize the model once
|
15 |
+
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
|
16 |
+
|
17 |
+
@backoff.on_exception(
|
18 |
+
backoff.expo,
|
19 |
+
Exception,
|
20 |
+
max_tries=3,
|
21 |
+
giveup=lambda e: "rate limit" not in str(e).lower() and "timeout" not in str(e).lower(),
|
22 |
+
max_value=20
|
23 |
+
)
|
24 |
+
def generate_with_gemini(prompt: str, max_tokens: int = 2048, temperature: float = 0.7) -> str:
|
25 |
+
"""
|
26 |
+
Generates a response from the Gemini model using the given prompt.
|
27 |
+
Retries on transient errors using exponential backoff.
|
28 |
+
"""
|
29 |
+
try:
|
30 |
+
response = model.generate_content(
|
31 |
+
prompt,
|
32 |
+
generation_config={
|
33 |
+
"max_output_tokens": max_tokens,
|
34 |
+
"temperature": temperature
|
35 |
+
}
|
36 |
+
)
|
37 |
+
return response.text
|
38 |
+
except Exception as e:
|
39 |
+
print(f"Gemini API Error: {str(e)}")
|
40 |
+
raise e
|
41 |
+
|
42 |
+
def create_travel_plan_prompt(travel_data: Dict) -> str:
|
43 |
+
"""
|
44 |
+
Constructs a prompt to generate a travel itinerary based on user-provided data.
|
45 |
+
"""
|
46 |
+
destination = travel_data.get("destination") or "a suitable destination based on additional preferences"
|
47 |
+
location_type = travel_data.get("location_type")
|
48 |
+
province = travel_data.get("province")
|
49 |
+
starting_location = travel_data.get("starting_location") or "Pakistan"
|
50 |
+
gender = travel_data.get("gender")
|
51 |
+
num_people = travel_data.get("num_people")
|
52 |
+
has_children = travel_data.get("has_children")
|
53 |
+
activity_type = travel_data.get("activity_type")
|
54 |
+
travel_group = travel_data.get("travel_group")
|
55 |
+
preferences = ", ".join(travel_data.get("preferences", [])) or "None"
|
56 |
+
budget = travel_data.get("budget", 0)
|
57 |
+
num_days = travel_data.get("num_days")
|
58 |
+
additional_preferences = travel_data.get("additional_preferences", "") or "None"
|
59 |
+
budget_str = f"They have a budget of {budget} PKR" if budget > 0 else "No specific budget is provided"
|
60 |
+
|
61 |
+
prompt = f"""You are a travel expert specializing in Pakistan. Create a detailed {num_days}-day travel itinerary for {num_people} people, including {gender.lower()} travelers, who are {travel_group.lower()}, {'with children' if has_children else 'without children'}, starting from {starting_location}. {'They want to visit ' + destination + '.' if destination != 'a suitable destination based on additional preferences' else 'Suggest a suitable destination based on their preferences.'} The destination is a {location_type} in {province}. They prefer {activity_type.lower()} activities and are interested in {preferences}. {budget_str}. Additional requests: {additional_preferences}
|
62 |
+
|
63 |
+
INCLUDE ALL THE FOLLOWING SECTIONS in your itinerary, using simple and clear language:
|
64 |
+
|
65 |
+
1. **Itinerary Overview:**
|
66 |
+
- Destination: [Destination Name]
|
67 |
+
- Brief summary of the trip
|
68 |
+
- Key highlights and attractions
|
69 |
+
- Best time to visit the destination
|
70 |
+
- If no destination is specified, explain why the suggested destination was chosen
|
71 |
+
|
72 |
+
2. **Daily Itinerary:**
|
73 |
+
- For each day (Day 1 to Day {num_days}), provide:
|
74 |
+
- Morning, afternoon, and evening activities
|
75 |
+
- Meal suggestions (local cuisine where possible)
|
76 |
+
- Accommodation recommendations (hotels, guesthouses, etc.)
|
77 |
+
|
78 |
+
3. **Transportation:**
|
79 |
+
- How to travel from {starting_location} to the destination (e.g., flights, buses)
|
80 |
+
- Local transportation options within the destination
|
81 |
+
|
82 |
+
4. **Safety and Health:**
|
83 |
+
- Safety precautions, especially for children if applicable
|
84 |
+
- Health considerations or recommended vaccinations
|
85 |
+
|
86 |
+
5. **Cultural Etiquette:**
|
87 |
+
- Important cultural norms to respect
|
88 |
+
|
89 |
+
6. **Estimated Costs:**
|
90 |
+
- Detailed cost breakdown for activities, meals, accommodations, and transportation
|
91 |
+
- Ensure the itinerary fits within the budget if provided, or provide approximate costs
|
92 |
+
|
93 |
+
7. **Travel Tips:**
|
94 |
+
- Practical tips for a smooth trip (e.g., packing, local customs, safety)
|
95 |
+
|
96 |
+
8. **Contingency Plans:**
|
97 |
+
- Alternatives for bad weather, cancellations, or other issues
|
98 |
+
|
99 |
+
Format the itinerary using Markdown with clear headers (# for main sections, ## for days) and consistent structure for each day.
|
100 |
+
"""
|
101 |
+
return prompt
|
102 |
+
|
103 |
+
def create_chat_prompt(plan: str, destination: str, num_days: int, history: List[Dict], user_input: str) -> str:
|
104 |
+
"""
|
105 |
+
Constructs a prompt for a chat interaction based on an existing travel plan and chat history.
|
106 |
+
"""
|
107 |
+
conversation = ""
|
108 |
+
for msg in history:
|
109 |
+
conversation += f"{msg['role'].capitalize()}: {msg['content']}\n"
|
110 |
+
|
111 |
+
prompt = f"""You are a helpful travel assistant. The user has a {num_days}-day travel plan for {destination}.
|
112 |
+
The plan is:
|
113 |
+
{plan}
|
114 |
+
|
115 |
+
Conversation history:
|
116 |
+
{conversation}
|
117 |
+
User: {user_input}
|
118 |
+
|
119 |
+
Assistant:"""
|
120 |
+
return prompt
|
image_searcher.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# image_searcher.py
|
2 |
+
import requests
|
3 |
+
import random
|
4 |
+
from typing import List, Dict, Optional
|
5 |
+
|
6 |
+
class ImageSearcher:
|
7 |
+
def __init__(self, pexels_api_key: str):
|
8 |
+
self.pexels_api_key = pexels_api_key
|
9 |
+
self.headers = {
|
10 |
+
'Authorization': self.pexels_api_key,
|
11 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
12 |
+
}
|
13 |
+
|
14 |
+
def search_pexels(self, query: str, num_images: int = 10) -> List[Dict]:
|
15 |
+
try:
|
16 |
+
url = "https://api.pexels.com/v1/search"
|
17 |
+
params = {
|
18 |
+
'query': query,
|
19 |
+
'per_page': min(num_images, 80),
|
20 |
+
'page': 1
|
21 |
+
}
|
22 |
+
response = requests.get(url, params=params, headers=self.headers, timeout=10)
|
23 |
+
response.raise_for_status()
|
24 |
+
data = response.json()
|
25 |
+
images = []
|
26 |
+
for item in data.get('photos', []):
|
27 |
+
images.append({
|
28 |
+
'url': item['src']['medium'],
|
29 |
+
'thumbnail': item['src']['small'],
|
30 |
+
'title': item.get('alt', query),
|
31 |
+
'author': item['photographer'],
|
32 |
+
'source': 'pexels'
|
33 |
+
})
|
34 |
+
print(f"Found {len(images)} images from Pexels for query: {query}")
|
35 |
+
return images
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Pexels search failed for query {query}: {e}")
|
38 |
+
return []
|
39 |
+
|
40 |
+
def search_lorem_picsum(self, query: str, width: int = 400, height: int = 300, num_images: int = 10) -> List[Dict]:
|
41 |
+
images = []
|
42 |
+
title_base = f"Pakistan {query}" if query else "Pakistan Travel"
|
43 |
+
for i in range(num_images):
|
44 |
+
seed = random.randint(1, 1000)
|
45 |
+
images.append({
|
46 |
+
'url': f"https://picsum.photos/{width}/{height}?random={seed}",
|
47 |
+
'thumbnail': f"https://picsum.photos/200/150?random={seed}",
|
48 |
+
'title': f"{title_base} View {i+1}",
|
49 |
+
'author': 'Lorem Picsum',
|
50 |
+
'source': 'lorem_picsum'
|
51 |
+
})
|
52 |
+
print(f"Generated {len(images)} placeholder images for query: {query}")
|
53 |
+
return images
|
54 |
+
|
55 |
+
def search_all_sources(self, queries: List[str], num_images: int = 6) -> List[Dict]:
|
56 |
+
all_images = []
|
57 |
+
for query in queries:
|
58 |
+
if query:
|
59 |
+
full_query = f"{query} Pakistan"
|
60 |
+
pexels_images = self.search_pexels(full_query, num_images)
|
61 |
+
all_images.extend(pexels_images)
|
62 |
+
if len(all_images) >= num_images: break
|
63 |
+
|
64 |
+
if len(all_images) < num_images:
|
65 |
+
for query in queries:
|
66 |
+
if query and not any(f"{query} Pakistan" in q for q in [img['title'] for img in all_images]):
|
67 |
+
pexels_images = self.search_pexels(query, num_images - len(all_images))
|
68 |
+
all_images.extend(pexels_images)
|
69 |
+
if len(all_images) >= num_images: break
|
70 |
+
|
71 |
+
if len(all_images) < num_images:
|
72 |
+
print(f"Insufficient results, generating placeholders.")
|
73 |
+
placeholder_images = self.search_lorem_picsum(queries[0] if queries else '', num_images=num_images - len(all_images))
|
74 |
+
all_images.extend(placeholder_images)
|
75 |
+
|
76 |
+
return all_images[:num_images]
|
77 |
+
|
78 |
+
def create_image_queries(travel_data: Dict, destination: Optional[str]) -> List[str]:
|
79 |
+
queries = []
|
80 |
+
location_type = travel_data.get("location_type", "").lower() if travel_data.get("location_type") != "Other" else ""
|
81 |
+
province = travel_data.get("province", "").lower().replace("-", " ") if travel_data.get("province") else ""
|
82 |
+
preferences = travel_data.get("preferences", [])
|
83 |
+
preference_map = {
|
84 |
+
"Cultural Experience": "culture", "Food and Dining": "food",
|
85 |
+
"Shopping": "market", "Nature": "nature", "History": "historical"
|
86 |
+
}
|
87 |
+
pref_terms = [preference_map.get(pref, pref.lower()) for pref in preferences if pref in preference_map]
|
88 |
+
|
89 |
+
if destination and destination != "Suggested Destination": queries.append(destination)
|
90 |
+
if destination and destination != "Suggested Destination" and location_type: queries.append(f"{destination} {location_type}")
|
91 |
+
if destination and destination != "Suggested Destination" and pref_terms: queries.append(f"{destination} {' '.join(pref_terms)}")
|
92 |
+
if destination and destination != "Suggested Destination" and location_type and pref_terms: queries.append(f"{destination} {location_type} {' '.join(pref_terms)}")
|
93 |
+
if province and location_type: queries.append(f"{province} {location_type}")
|
94 |
+
if province and pref_terms: queries.append(f"{province} {' '.join(pref_terms)}")
|
95 |
+
if province: queries.append(province)
|
96 |
+
if location_type and pref_terms: queries.append(f"{location_type} {' '.join(pref_terms)}")
|
97 |
+
if location_type: queries.append(location_type)
|
98 |
+
|
99 |
+
queries = list(dict.fromkeys([q for q in queries if q]))
|
100 |
+
if not queries: queries.append("Pakistan travel")
|
101 |
+
print(f"Created image search queries: {queries}")
|
102 |
+
return queries
|
103 |
+
|
104 |
+
def extract_destination(plan_text: str, travel_data: Dict) -> Optional[str]:
|
105 |
+
lines = plan_text.split('\n')
|
106 |
+
for line in lines:
|
107 |
+
if line.strip().startswith('- Destination:'):
|
108 |
+
parts = line.split(':', 1)
|
109 |
+
if len(parts) > 1:
|
110 |
+
destination = parts[1].strip()
|
111 |
+
print(f"Extracted destination from plan: {destination}")
|
112 |
+
return destination
|
113 |
+
|
114 |
+
if travel_data.get("destination"): return travel_data["destination"]
|
115 |
+
if travel_data.get("additional_preferences"): return " ".join(travel_data["additional_preferences"].split()[:3])
|
116 |
+
if travel_data.get("province"): return travel_data["province"]
|
117 |
+
return None
|
main.py
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# # main.py
|
2 |
+
# import os
|
3 |
+
# from fastapi import FastAPI, HTTPException
|
4 |
+
# from fastapi.middleware.cors import CORSMiddleware
|
5 |
+
# from dotenv import load_dotenv
|
6 |
+
#
|
7 |
+
# from models import TravelDataInput, PlanResponse, ChatInput, ChatResponse
|
8 |
+
# from image_searcher import ImageSearcher, create_image_queries, extract_destination
|
9 |
+
# from gemini_utils import create_travel_plan_prompt, generate_with_gemini, create_chat_prompt
|
10 |
+
# from rag_utils import get_relevant_plan
|
11 |
+
#
|
12 |
+
# # Load environment variables (especially API keys)
|
13 |
+
# load_dotenv()
|
14 |
+
# PEXELS_API_KEY = os.getenv("PEXELS_API_KEY", "LB4mRviLcvE72R2645m3f4NpYGBMpPucVctpvzJAsCJYC2dhbGVpLraK") # Replace or set in env
|
15 |
+
# GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY", "AIzaSyDuDPP0eWWA0UsGBBIeOI7Fl0WcPli4Sdo") # Replace or set in env
|
16 |
+
#
|
17 |
+
# if not PEXELS_API_KEY or not GOOGLE_API_KEY:
|
18 |
+
# print("Warning: API keys not found. Please set PEXELS_API_KEY and GOOGLE_API_KEY environment variables.")
|
19 |
+
#
|
20 |
+
# # Initialize app and image searcher
|
21 |
+
# app = FastAPI(title="Travel Recommendation API")
|
22 |
+
# image_searcher = ImageSearcher(PEXELS_API_KEY)
|
23 |
+
#
|
24 |
+
# # CORS (Cross-Origin Resource Sharing) - Allow frontend access
|
25 |
+
# app.add_middleware(
|
26 |
+
# CORSMiddleware,
|
27 |
+
# allow_origins=["*"], # Adjust in production
|
28 |
+
# allow_credentials=True,
|
29 |
+
# allow_methods=["*"],
|
30 |
+
# allow_headers=["*"],
|
31 |
+
# )
|
32 |
+
#
|
33 |
+
# @app.get("/", summary="Root endpoint for health check")
|
34 |
+
# def read_root():
|
35 |
+
# return {"message": "Travel Recommendation API is running!"}
|
36 |
+
#
|
37 |
+
# @app.post("/generate_plan", response_model=PlanResponse, summary="Generate a travel plan")
|
38 |
+
# async def generate_plan_endpoint(travel_input: TravelDataInput):
|
39 |
+
# travel_data = travel_input.model_dump() # Use model_dump() for Pydantic v2
|
40 |
+
# source = "generated"
|
41 |
+
# plan_text = None
|
42 |
+
#
|
43 |
+
# # 1. Try RAG first
|
44 |
+
# try:
|
45 |
+
# relevant_plan = get_relevant_plan(travel_data)
|
46 |
+
# if relevant_plan:
|
47 |
+
# plan_text = relevant_plan
|
48 |
+
# source = "index"
|
49 |
+
# except Exception as e:
|
50 |
+
# print(f"RAG check failed (continuing with generation): {e}")
|
51 |
+
#
|
52 |
+
# # 2. If no RAG plan, generate with Gemini
|
53 |
+
# if not plan_text:
|
54 |
+
# try:
|
55 |
+
# prompt = create_travel_plan_prompt(travel_data)
|
56 |
+
# plan_text = generate_with_gemini(prompt)
|
57 |
+
# source = "generated"
|
58 |
+
# except Exception as e:
|
59 |
+
# print(f"Gemini generation failed: {e}")
|
60 |
+
# raise HTTPException(status_code=500, detail=f"Failed to generate plan: {e}")
|
61 |
+
#
|
62 |
+
# if not plan_text:
|
63 |
+
# raise HTTPException(status_code=500, detail="Could not retrieve or generate a travel plan.")
|
64 |
+
#
|
65 |
+
# # 3. Extract destination & get images
|
66 |
+
# try:
|
67 |
+
# destination = extract_destination(plan_text, travel_data) or "Suggested Destination"
|
68 |
+
# image_queries = create_image_queries(travel_data, destination)
|
69 |
+
# images = image_searcher.search_all_sources(image_queries, num_images=6)
|
70 |
+
# except Exception as e:
|
71 |
+
# print(f"Image search failed: {e}")
|
72 |
+
# images = [] # Return empty list if image search fails
|
73 |
+
# image_queries = []
|
74 |
+
#
|
75 |
+
# return PlanResponse(
|
76 |
+
# plan_text=plan_text,
|
77 |
+
# destination_used=destination,
|
78 |
+
# images=images,
|
79 |
+
# image_queries_used=image_queries,
|
80 |
+
# source=source
|
81 |
+
# )
|
82 |
+
#
|
83 |
+
# @app.post("/chat", response_model=ChatResponse, summary="Chat about the travel plan")
|
84 |
+
# async def chat_endpoint(chat_input: ChatInput):
|
85 |
+
# try:
|
86 |
+
# prompt = create_chat_prompt(
|
87 |
+
# chat_input.plan_text,
|
88 |
+
# chat_input.destination,
|
89 |
+
# chat_input.num_days,
|
90 |
+
# [msg.model_dump() for msg in chat_input.chat_history], # Convert Pydantic to dict
|
91 |
+
# chat_input.user_input
|
92 |
+
# )
|
93 |
+
# response = generate_with_gemini(prompt, max_tokens=1024, temperature=0.5)
|
94 |
+
# return ChatResponse(assistant_response=response)
|
95 |
+
# except Exception as e:
|
96 |
+
# print(f"Chat generation failed: {e}")
|
97 |
+
# raise HTTPException(status_code=500, detail=f"Failed to get chat response: {e}")
|
98 |
+
#
|
99 |
+
# # To run locally: uvicorn main:app --reload
|
100 |
+
|
101 |
+
# PEXELS_API_KEY = "LB4mRviLcvE72R2645m3f4NpYGBMpPucVctpvzJAsCJYC2dhbGVpLraK"
|
102 |
+
# GOOGLE_API_KEY = "AIzaSyDuDPP0eWWA0UsGBBIeOI7Fl0WcPli4Sdo"
|
103 |
+
|
104 |
+
import re
|
105 |
+
import os
|
106 |
+
import json
|
107 |
+
from typing import List
|
108 |
+
from fastapi import FastAPI, HTTPException
|
109 |
+
from fastapi.middleware.cors import CORSMiddleware
|
110 |
+
from pydantic import BaseModel
|
111 |
+
|
112 |
+
from models import TravelDataInput, PlanResponse
|
113 |
+
from image_searcher import ImageSearcher, create_image_queries, extract_destination
|
114 |
+
from gemini_utils import create_travel_plan_prompt, generate_with_gemini
|
115 |
+
from rag_utils import get_relevant_plan
|
116 |
+
|
117 |
+
# API keys
|
118 |
+
PEXELS_API_KEY = "LB4mRviLcvE72R2645m3f4NpYGBMpPucVctpvzJAsCJYC2dhbGVpLraK"
|
119 |
+
GOOGLE_API_KEY = "AIzaSyDuDPP0eWWA0UsGBBIeOI7Fl0WcPli4Sdo"
|
120 |
+
|
121 |
+
# App setup
|
122 |
+
app = FastAPI(title="Travel Recommendation API")
|
123 |
+
image_searcher = ImageSearcher(PEXELS_API_KEY)
|
124 |
+
|
125 |
+
app.add_middleware(
|
126 |
+
CORSMiddleware,
|
127 |
+
allow_origins=["*"],
|
128 |
+
allow_credentials=True,
|
129 |
+
allow_methods=["*"],
|
130 |
+
allow_headers=["*"],
|
131 |
+
)
|
132 |
+
|
133 |
+
@app.get("/", summary="Root endpoint for health check")
|
134 |
+
def read_root():
|
135 |
+
return {"message": "Travel Recommendation API is running!"}
|
136 |
+
|
137 |
+
@app.post("/generate_plan", response_model=PlanResponse, summary="Generate a travel plan")
|
138 |
+
async def generate_plan_endpoint(travel_input: TravelDataInput):
|
139 |
+
travel_data = travel_input.model_dump()
|
140 |
+
source = "generated"
|
141 |
+
plan_text = None
|
142 |
+
|
143 |
+
# Step 1: RAG
|
144 |
+
try:
|
145 |
+
relevant_plan = get_relevant_plan(travel_data)
|
146 |
+
if relevant_plan:
|
147 |
+
plan_text = relevant_plan
|
148 |
+
source = "index"
|
149 |
+
except Exception as e:
|
150 |
+
print(f"RAG check failed: {e}")
|
151 |
+
|
152 |
+
# Step 2: Generate if RAG fails
|
153 |
+
if not plan_text:
|
154 |
+
try:
|
155 |
+
prompt = create_travel_plan_prompt(travel_data)
|
156 |
+
plan_text = generate_with_gemini(prompt)
|
157 |
+
except Exception as e:
|
158 |
+
print(f"Gemini generation failed: {e}")
|
159 |
+
raise HTTPException(status_code=500, detail=f"Failed to generate plan: {e}")
|
160 |
+
|
161 |
+
if not plan_text:
|
162 |
+
raise HTTPException(status_code=500, detail="Could not retrieve or generate a travel plan.")
|
163 |
+
|
164 |
+
# Step 3: Get destination & images
|
165 |
+
try:
|
166 |
+
destination = extract_destination(plan_text, travel_data) or "Suggested Destination"
|
167 |
+
image_queries = create_image_queries(travel_data, destination)
|
168 |
+
images = image_searcher.search_all_sources(image_queries, num_images=6)
|
169 |
+
except Exception as e:
|
170 |
+
print(f"Image search failed: {e}")
|
171 |
+
images = []
|
172 |
+
image_queries = []
|
173 |
+
|
174 |
+
return PlanResponse(
|
175 |
+
plan_text=plan_text,
|
176 |
+
destination_used=destination,
|
177 |
+
images=images,
|
178 |
+
image_queries_used=image_queries,
|
179 |
+
source=source,
|
180 |
+
)
|
181 |
+
|
182 |
+
# ========== EXTRACTION ENDPOINT ==========
|
183 |
+
|
184 |
+
class ExtractionInput(BaseModel):
|
185 |
+
plan_text: str
|
186 |
+
|
187 |
+
@app.post("/extract_package", summary="Extract structured package data from plan text")
|
188 |
+
async def extract_package_endpoint(data: ExtractionInput):
|
189 |
+
extraction_prompt = f"""
|
190 |
+
Extract ONLY the following fields from the travel plan below and return STRICTLY in raw JSON format. No extra explanation, no markdown, no text.
|
191 |
+
|
192 |
+
Required JSON format:
|
193 |
+
{{
|
194 |
+
"packageName": string,
|
195 |
+
"packageDescription": string,
|
196 |
+
"packageDestination": string,
|
197 |
+
"packageDays": integer,
|
198 |
+
"packageNights": integer,
|
199 |
+
"packageAccommodation": string,
|
200 |
+
"packageTransportation": string,
|
201 |
+
"packageMeals": string,
|
202 |
+
"packageActivities": string,
|
203 |
+
"packagePrice": float,
|
204 |
+
"packageDiscountPrice": float,
|
205 |
+
"packageOffer": boolean,
|
206 |
+
"packageRating": 0,
|
207 |
+
"packageTotalRatings": 0
|
208 |
+
}}
|
209 |
+
|
210 |
+
TRAVEL PLAN:
|
211 |
+
\"\"\"
|
212 |
+
{data.plan_text}
|
213 |
+
\"\"\"
|
214 |
+
"""
|
215 |
+
|
216 |
+
try:
|
217 |
+
raw_response = generate_with_gemini(extraction_prompt)
|
218 |
+
print("Gemini raw response:", raw_response)
|
219 |
+
|
220 |
+
# Extract the JSON body from the response
|
221 |
+
json_match = re.search(r'\{.*\}', raw_response, re.DOTALL)
|
222 |
+
if not json_match:
|
223 |
+
raise ValueError("No JSON object found in Gemini response")
|
224 |
+
|
225 |
+
json_str = json_match.group(0)
|
226 |
+
structured_package = json.loads(json_str)
|
227 |
+
|
228 |
+
except Exception as e:
|
229 |
+
print("Gemini extraction failed:", e)
|
230 |
+
print("Prompt was:\n", extraction_prompt)
|
231 |
+
raise HTTPException(status_code=500, detail="Failed to extract structured package data.")
|
232 |
+
|
233 |
+
return structured_package
|
models.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# models.py
|
2 |
+
from pydantic import BaseModel, Field
|
3 |
+
from typing import List, Dict, Optional
|
4 |
+
|
5 |
+
class TravelDataInput(BaseModel):
|
6 |
+
destination: Optional[str] = Field(None, description="e.g., Neelum Valley")
|
7 |
+
location_type: str = Field(..., description="City, Beach, Mountain, etc.")
|
8 |
+
province: str = Field(..., description="Punjab, Sindh, etc.")
|
9 |
+
starting_location: str = Field(..., description="e.g., Islamabad")
|
10 |
+
gender: str = Field(..., description="Male, Female, Mixed")
|
11 |
+
num_people: int = Field(..., gt=0, description="Number of people")
|
12 |
+
has_children: bool = Field(False, description="Whether children are present")
|
13 |
+
activity_type: str = Field(..., description="Adventure, Sightseeing, Both")
|
14 |
+
travel_group: str = Field(..., description="Solo, With partner, etc.")
|
15 |
+
preferences: List[str] = Field([], description="Cultural Experience, Food, etc.")
|
16 |
+
budget: int = Field(0, ge=0, description="Budget in PKR")
|
17 |
+
num_days: int = Field(..., gt=0, description="Number of days")
|
18 |
+
additional_preferences: Optional[str] = Field(None, description="Any other requests")
|
19 |
+
|
20 |
+
class PlanResponse(BaseModel):
|
21 |
+
plan_text: str
|
22 |
+
destination_used: str
|
23 |
+
images: List[Dict]
|
24 |
+
image_queries_used: List[str]
|
25 |
+
source: str
|
26 |
+
|
27 |
+
class ChatMessage(BaseModel):
|
28 |
+
role: str # user or assistant
|
29 |
+
content: str
|
30 |
+
|
31 |
+
class ChatInput(BaseModel):
|
32 |
+
plan_text: str
|
33 |
+
destination: str
|
34 |
+
num_days: int
|
35 |
+
chat_history: List[ChatMessage]
|
36 |
+
user_input: str
|
37 |
+
|
38 |
+
class ChatResponse(BaseModel):
|
39 |
+
assistant_response: str
|
rag_utils.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# rag_utils.py
|
2 |
+
import faiss
|
3 |
+
import pickle
|
4 |
+
import numpy as np
|
5 |
+
from typing import Dict, Optional
|
6 |
+
|
7 |
+
FAISS_INDEX_PATH = "faiss_travel_index"
|
8 |
+
PKL_PATH = "faiss_travel_index.pkl"
|
9 |
+
|
10 |
+
def get_relevant_plan(travel_data: Dict) -> Optional[str]:
|
11 |
+
try:
|
12 |
+
index = faiss.read_index(FAISS_INDEX_PATH)
|
13 |
+
with open(PKL_PATH, "rb") as f:
|
14 |
+
plan_data = pickle.load(f)
|
15 |
+
|
16 |
+
if not isinstance(plan_data, dict):
|
17 |
+
print("Warning: plan_data is not a dict, assuming list with index as key")
|
18 |
+
plan_data = {i: plan for i, plan in enumerate(plan_data)}
|
19 |
+
|
20 |
+
# --- IMPORTANT ---
|
21 |
+
# This part needs a real embedding model.
|
22 |
+
# You need to install 'sentence-transformers' and use it here.
|
23 |
+
# For now, it uses a random vector, which WON'T give relevant results.
|
24 |
+
# Example (uncomment and install 'sentence-transformers'):
|
25 |
+
# from sentence_transformers import SentenceTransformer
|
26 |
+
# model = SentenceTransformer('all-MiniLM-L6-v2')
|
27 |
+
query_text = f"{travel_data.get('destination', '')} {travel_data.get('location_type', '')} {travel_data.get('province', '')} {' '.join(travel_data.get('preferences', []))}"
|
28 |
+
# query_vector = model.encode([query_text]).astype('float32') # Use this line with SentenceTransformers
|
29 |
+
query_vector = np.random.rand(1, index.d).astype('float32') # Dummy vector - REPLACE THIS
|
30 |
+
# --- END IMPORTANT ---
|
31 |
+
|
32 |
+
k = 1
|
33 |
+
distances, indices = index.search(query_vector, k)
|
34 |
+
|
35 |
+
if indices.size > 0:
|
36 |
+
relevant_index = indices[0][0]
|
37 |
+
# Check if the index is valid for plan_data
|
38 |
+
if relevant_index in plan_data:
|
39 |
+
relevant_plan = plan_data[relevant_index]
|
40 |
+
print(f"Found relevant plan at index {relevant_index} with distance {distances[0][0]}")
|
41 |
+
return relevant_plan
|
42 |
+
else:
|
43 |
+
# Try to map the index if plan_data is a list
|
44 |
+
if isinstance(plan_data, dict) and relevant_index < len(plan_data):
|
45 |
+
plan_keys = list(plan_data.keys())
|
46 |
+
mapped_key = plan_keys[relevant_index]
|
47 |
+
relevant_plan = plan_data[mapped_key]
|
48 |
+
print(f"Found relevant plan at mapped index {mapped_key} with distance {distances[0][0]}")
|
49 |
+
return relevant_plan
|
50 |
+
else:
|
51 |
+
print(f"No plan found at index {relevant_index}")
|
52 |
+
|
53 |
+
except FileNotFoundError:
|
54 |
+
print(f"Error: FAISS index or pickle file not found at {FAISS_INDEX_PATH} or {PKL_PATH}")
|
55 |
+
return None
|
56 |
+
except Exception as e:
|
57 |
+
print(f"Error in RAG functionality: {e}")
|
58 |
+
return None
|
59 |
+
|
60 |
+
print("No relevant plan found.")
|
61 |
+
return None
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
pydantic
|
4 |
+
requests
|
5 |
+
google-generativeai
|
6 |
+
backoff
|
7 |
+
python-dotenv
|
8 |
+
faiss-cpu # Or faiss-gpu if you have a GPU and CUDA set up
|
9 |
+
numpy
|
10 |
+
google-generativeai
|
11 |
+
# Add sentence-transformers if you implement real RAG embeddings
|
12 |
+
# sentence-transformers
|