Spaces:
Runtime error
Runtime error
Upload 21 files
Browse files- .gitattributes +7 -0
- .gitignore +11 -0
- Dockerfile +29 -0
- app.py +605 -0
- modules/__init__.py +1 -0
- modules/audio.py +165 -0
- modules/chatbot_processor.py +247 -0
- modules/config.py +87 -0
- modules/input_tracker.py +133 -0
- modules/location.py +76 -0
- modules/location_processor.py +346 -0
- modules/models.py +703 -0
- modules/nlp_processor.py +158 -0
- modules/parallel.py +126 -0
- modules/property_processor.py +220 -0
- modules/rag/feature_matcher.py +243 -0
- modules/response.py +249 -0
- modules/security.py +192 -0
- requirements.txt +43 -0
- templates/index.html +1824 -0
.gitattributes
CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
models/llm/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
models/saved_models/feature_matcher/zero_shot_model filter=lfs diff=lfs merge=lfs -text
|
38 |
+
models/llm/conversations.json filter=lfs diff=lfs merge=lfs -text
|
39 |
+
models/saved_models/feature_matcher/base_model/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
40 |
+
models/saved_models/model_state_dict.pth filter=lfs diff=lfs merge=lfs -text
|
41 |
+
models/saved_models/property_index.faiss filter=lfs diff=lfs merge=lfs -text
|
42 |
+
models/saved_models/search_system.pkl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ignore Python cache files
|
2 |
+
models/
|
3 |
+
__pycache__/
|
4 |
+
*.pyc
|
5 |
+
*.pyo
|
6 |
+
*.pyd
|
7 |
+
.Python
|
8 |
+
env/
|
9 |
+
venv/
|
10 |
+
.env
|
11 |
+
*.log
|
Dockerfile
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
# Install system dependencies
|
6 |
+
RUN apt-get update && apt-get install -y \
|
7 |
+
build-essential \
|
8 |
+
libsndfile1 \
|
9 |
+
&& rm -rf /var/lib/apt/lists/*
|
10 |
+
|
11 |
+
# Copy requirements first to leverage Docker cache
|
12 |
+
COPY requirements.txt .
|
13 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
14 |
+
|
15 |
+
# Copy the rest of the application
|
16 |
+
COPY . .
|
17 |
+
|
18 |
+
# Create necessary directories
|
19 |
+
RUN mkdir -p models/saved_models models/llm temp
|
20 |
+
|
21 |
+
# Set environment variables
|
22 |
+
ENV PYTHONUNBUFFERED=1
|
23 |
+
ENV PORT=7860
|
24 |
+
|
25 |
+
# Expose the port
|
26 |
+
EXPOSE 7860
|
27 |
+
|
28 |
+
# Run the application
|
29 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
@@ -0,0 +1,605 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
from flask import Flask, request, jsonify, render_template
|
4 |
+
from flask_cors import CORS
|
5 |
+
from flask_limiter import Limiter
|
6 |
+
from flask_limiter.util import get_remote_address
|
7 |
+
import threading
|
8 |
+
from functools import wraps
|
9 |
+
import sys
|
10 |
+
import time
|
11 |
+
from geopy.distance import geodesic
|
12 |
+
import torch
|
13 |
+
|
14 |
+
# Add the modules directory to Python path
|
15 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
16 |
+
|
17 |
+
# Import modules
|
18 |
+
from modules.config import *
|
19 |
+
from modules.models import *
|
20 |
+
from modules.security import *
|
21 |
+
from modules.audio import *
|
22 |
+
from modules.location_processor import LocationProcessor, set_location
|
23 |
+
from modules.response import *
|
24 |
+
from modules.input_tracker import *
|
25 |
+
from modules.chatbot_processor import ChatbotProcessor
|
26 |
+
|
27 |
+
# Import specific functions
|
28 |
+
from modules.security import with_user_plan
|
29 |
+
from modules.audio import process_audio_file
|
30 |
+
|
31 |
+
# Configure logging
|
32 |
+
logging.basicConfig(
|
33 |
+
level=logging.INFO,
|
34 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
35 |
+
handlers=[
|
36 |
+
logging.FileHandler(os.path.join(BASE_DIR, 'app.log')),
|
37 |
+
logging.StreamHandler()
|
38 |
+
]
|
39 |
+
)
|
40 |
+
logger = logging.getLogger(__name__)
|
41 |
+
|
42 |
+
# Initialize Flask app with correct template folder path
|
43 |
+
app = Flask(__name__,
|
44 |
+
template_folder=os.path.join(BASE_DIR, 'templates'),
|
45 |
+
static_folder=os.path.join(BASE_DIR, 'static')
|
46 |
+
)
|
47 |
+
conversation_context = {}
|
48 |
+
|
49 |
+
# Configure CORS
|
50 |
+
CORS(app, resources={
|
51 |
+
r"/*": {
|
52 |
+
"origins": ["*"], # Allow all origins for Hugging Face Spaces
|
53 |
+
"methods": ["GET", "POST", "OPTIONS"],
|
54 |
+
"allow_headers": ["Content-Type", "X-Session-ID"]
|
55 |
+
}
|
56 |
+
})
|
57 |
+
|
58 |
+
# Initialize rate limiter
|
59 |
+
limiter = Limiter(
|
60 |
+
app=app,
|
61 |
+
key_func=get_remote_address,
|
62 |
+
default_limits=[f"{MAX_REQUESTS_PER_WINDOW} per minute", "1000 per hour"]
|
63 |
+
)
|
64 |
+
|
65 |
+
# Initialize components in the correct order
|
66 |
+
print("Loading sentence transformer...")
|
67 |
+
model_embedding = load_sentence_transformer()
|
68 |
+
|
69 |
+
print("Fetching and caching properties...")
|
70 |
+
properties = fetch_and_cache_properties()
|
71 |
+
if not properties:
|
72 |
+
logger.error("Failed to fetch properties. Please check API connection.")
|
73 |
+
sys.exit(1)
|
74 |
+
|
75 |
+
print("Loading FAISS index...")
|
76 |
+
index = load_faiss_index()
|
77 |
+
|
78 |
+
print("Loading PCA model...")
|
79 |
+
pca = load_pca_model()
|
80 |
+
|
81 |
+
print("Initializing retriever...")
|
82 |
+
retriever = CustomRagRetriever(index, model_embedding, pca)
|
83 |
+
|
84 |
+
print("Loading tokenizer and LLM model...")
|
85 |
+
tokenizer, model_llm = load_tokenizer_and_model()
|
86 |
+
|
87 |
+
print("Initializing security components...")
|
88 |
+
security_manager = SecurityManager()
|
89 |
+
query_validator = QueryValidator(model_embedding)
|
90 |
+
|
91 |
+
print("Initializing input tracker...")
|
92 |
+
input_tracker = UserInputTracker()
|
93 |
+
|
94 |
+
# Initialize processors
|
95 |
+
chatbot_processor = ChatbotProcessor()
|
96 |
+
|
97 |
+
def security_check(f):
|
98 |
+
@wraps(f)
|
99 |
+
def decorated_function(*args, **kwargs):
|
100 |
+
try:
|
101 |
+
ip_address = request.remote_addr
|
102 |
+
|
103 |
+
if not security_manager.check_rate_limit(ip_address):
|
104 |
+
return jsonify({"error": "Rate limit exceeded"}), 429
|
105 |
+
|
106 |
+
if request.method == 'POST':
|
107 |
+
if not request.is_json:
|
108 |
+
return jsonify({"error": "Content-Type must be application/json"}), 415
|
109 |
+
|
110 |
+
return f(*args, **kwargs)
|
111 |
+
except Exception as e:
|
112 |
+
logging.error(f"Security check failed: {str(e)}")
|
113 |
+
return jsonify({"error": "Security check failed"}), 400
|
114 |
+
return decorated_function
|
115 |
+
|
116 |
+
@app.before_request
|
117 |
+
def handle_preflight():
|
118 |
+
if request.method == 'OPTIONS':
|
119 |
+
response = app.make_default_options_response()
|
120 |
+
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, X-Session-ID')
|
121 |
+
response.headers.add('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
|
122 |
+
return response
|
123 |
+
|
124 |
+
@app.route('/')
|
125 |
+
def index():
|
126 |
+
print("Rendering index page")
|
127 |
+
return render_template('index.html')
|
128 |
+
|
129 |
+
@app.route('/search', methods=['POST'])
|
130 |
+
@security_check
|
131 |
+
@limiter.limit("30 per minute")
|
132 |
+
@with_user_plan
|
133 |
+
def search():
|
134 |
+
try:
|
135 |
+
data = request.json
|
136 |
+
query = data.get('query')
|
137 |
+
|
138 |
+
if not query:
|
139 |
+
return jsonify({"error": "Query parameter is missing"}), 400
|
140 |
+
|
141 |
+
cleaned_query = query_validator.clean_input(query)
|
142 |
+
if not query_validator.validate_query_length(cleaned_query):
|
143 |
+
return jsonify({"error": "Query too long"}), 400
|
144 |
+
|
145 |
+
session_id = data.get('session_id')
|
146 |
+
continue_conversation = data.get('continue', False)
|
147 |
+
|
148 |
+
if session_id not in conversation_context or not continue_conversation:
|
149 |
+
search_results = retriever.retrieve(cleaned_query)
|
150 |
+
formatted_results = []
|
151 |
+
|
152 |
+
for result in search_results:
|
153 |
+
property_info = result['property']
|
154 |
+
|
155 |
+
# Get property images from the property info
|
156 |
+
property_images = property_info.get('propertyImages', [])
|
157 |
+
if isinstance(property_images, str):
|
158 |
+
if ',' in property_images:
|
159 |
+
property_images = [img.strip() for img in property_images.split(',')]
|
160 |
+
else:
|
161 |
+
property_images = [property_images]
|
162 |
+
elif property_images is None:
|
163 |
+
property_images = []
|
164 |
+
|
165 |
+
property_info = convert_numeric_fields_to_int(property_info)
|
166 |
+
|
167 |
+
formatted_result = {
|
168 |
+
"PropertyName": property_info.get('PropertyName', 'N/A'),
|
169 |
+
"Address": property_info.get('Address', 'N/A'),
|
170 |
+
"ZipCode": property_info.get('ZipCode', 0),
|
171 |
+
"LeasableSquareFeet": property_info.get('LeasableSquareFeet', 0),
|
172 |
+
"YearBuilt": property_info.get('YearBuilt', 0),
|
173 |
+
"NumberOfRooms": property_info.get('NumberOfRooms', 0),
|
174 |
+
"ParkingSpaces": property_info.get('ParkingSpaces', 0),
|
175 |
+
"PropertyManager": property_info.get('PropertyManager', 'N/A'),
|
176 |
+
"MarketValue": float(property_info.get('MarketValue', 0)),
|
177 |
+
"TaxAssessmentNumber": property_info.get('TaxAssessmentNumber', 'N/A'),
|
178 |
+
"Latitude": float(property_info.get('Latitude', 0)),
|
179 |
+
"Longitude": float(property_info.get('Longitude', 0)),
|
180 |
+
"CreateDate": property_info.get('CreateDate', 'N/A'),
|
181 |
+
"LastModifiedDate": property_info.get('LastModifiedDate', 'N/A'),
|
182 |
+
"City": property_info.get('City', 'N/A'),
|
183 |
+
"State": property_info.get('State', 'N/A'),
|
184 |
+
"Country": property_info.get('Country', 'N/A'),
|
185 |
+
"PropertyType": property_info.get('PropertyType', 'N/A'),
|
186 |
+
"PropertyStatus": property_info.get('PropertyStatus', 'N/A'),
|
187 |
+
"Description": property_info.get('Description', 'N/A'),
|
188 |
+
"ViewNumber": property_info.get('ViewNumber', 0),
|
189 |
+
"Contact": property_info.get('Contact', 0),
|
190 |
+
"TotalSquareFeet": property_info.get('TotalSquareFeet', 0),
|
191 |
+
"IsDeleted": bool(property_info.get('IsDeleted', False)),
|
192 |
+
"Beds": property_info.get('Beds', 0),
|
193 |
+
"Baths": property_info.get('Baths', 0),
|
194 |
+
"AgentName": property_info.get('AgentName', 'N/A'),
|
195 |
+
"AgentPhoneNumber": property_info.get('AgentPhoneNumber', 'N/A'),
|
196 |
+
"AgentEmail": property_info.get('AgentEmail', 'N/A'),
|
197 |
+
"KeyFeatures": property_info.get('KeyFeatures', 'N/A'),
|
198 |
+
"NearbyAmenities": property_info.get('NearbyAmenities', 'N/A'),
|
199 |
+
"propertyImages": property_images,
|
200 |
+
"Distance": result['distance']
|
201 |
+
}
|
202 |
+
formatted_results.append(formatted_result)
|
203 |
+
|
204 |
+
conversation_context[session_id] = formatted_results
|
205 |
+
else:
|
206 |
+
formatted_results = conversation_context[session_id]
|
207 |
+
|
208 |
+
print(f"Returning {len(formatted_results)} search results")
|
209 |
+
if formatted_results:
|
210 |
+
print(f"Sample property images array: {formatted_results[0]['propertyImages']}")
|
211 |
+
|
212 |
+
return jsonify(formatted_results)
|
213 |
+
|
214 |
+
except Exception as e:
|
215 |
+
logging.error(f"Error in search endpoint: {str(e)}")
|
216 |
+
return jsonify({"error": "An error occurred processing your request"}), 500
|
217 |
+
|
218 |
+
@app.route('/transcribe', methods=['POST'])
|
219 |
+
@security_check
|
220 |
+
def transcribe():
|
221 |
+
try:
|
222 |
+
if 'audio' not in request.files:
|
223 |
+
return jsonify({"error": "No audio file provided"}), 400
|
224 |
+
|
225 |
+
audio_file = request.files['audio']
|
226 |
+
|
227 |
+
# Validate file size (max 10MB)
|
228 |
+
if audio_file.content_length and audio_file.content_length > 10 * 1024 * 1024:
|
229 |
+
return jsonify({"error": "Audio file too large. Maximum size is 10MB"}), 400
|
230 |
+
|
231 |
+
# Validate file type
|
232 |
+
allowed_extensions = {'wav', 'mp3', 'ogg', 'webm'}
|
233 |
+
if '.' not in audio_file.filename or \
|
234 |
+
audio_file.filename.rsplit('.', 1)[1].lower() not in allowed_extensions:
|
235 |
+
return jsonify({"error": "Invalid audio file format. Supported formats: WAV, MP3, OGG, WEBM"}), 400
|
236 |
+
|
237 |
+
result = process_audio_file(audio_file)
|
238 |
+
|
239 |
+
if isinstance(result, tuple) and len(result) == 2:
|
240 |
+
response, status_code = result
|
241 |
+
return jsonify(response), status_code
|
242 |
+
|
243 |
+
return jsonify(result)
|
244 |
+
|
245 |
+
except Exception as e:
|
246 |
+
logger.error(f"Error in transcribe endpoint: {str(e)}")
|
247 |
+
return jsonify({"error": "An error occurred processing your audio file"}), 500
|
248 |
+
|
249 |
+
@app.route('/generate', methods=['POST'])
|
250 |
+
@security_check
|
251 |
+
@limiter.limit("30 per minute")
|
252 |
+
@with_user_plan
|
253 |
+
def generate():
|
254 |
+
data = request.json
|
255 |
+
query = data.get('query')
|
256 |
+
session_id = data.get('session_id')
|
257 |
+
continue_conversation = data.get('continue', False)
|
258 |
+
current_plan = get_current_plan()
|
259 |
+
|
260 |
+
if not query:
|
261 |
+
return jsonify({"error": "Query parameter is missing"}), 400
|
262 |
+
if session_id in conversation_context and continue_conversation:
|
263 |
+
previous_results = conversation_context[session_id]
|
264 |
+
combined_query = f"Based on previous results:{previous_results}New Query: {query}"
|
265 |
+
response, duration = generate_response(combined_query, tokenizer, model_llm)
|
266 |
+
else:
|
267 |
+
response, duration = generate_response(query, tokenizer, model_llm)
|
268 |
+
conversation_context[session_id] = response
|
269 |
+
print(f"Generated response: {response}")
|
270 |
+
print(f"Time taken to generate response: {duration:.2f} seconds\n")
|
271 |
+
return jsonify({"response": response, "duration": duration})
|
272 |
+
|
273 |
+
@app.route('/set-location', methods=['POST'])
|
274 |
+
@security_check
|
275 |
+
def handle_set_location():
|
276 |
+
"""Handle location setting and nearby property search"""
|
277 |
+
try:
|
278 |
+
# Get request data
|
279 |
+
data = request.get_json()
|
280 |
+
print(f"Received data: {data}")
|
281 |
+
|
282 |
+
# Extract values
|
283 |
+
latitude = float(data.get('latitude', 0))
|
284 |
+
longitude = float(data.get('longitude', 0))
|
285 |
+
session_id = data.get('session_id', '')
|
286 |
+
|
287 |
+
print(f"Extracted values - latitude: {latitude}, longitude: {longitude}, session_id: {session_id}")
|
288 |
+
|
289 |
+
# Validate coordinates
|
290 |
+
if latitude == 0 or longitude == 0:
|
291 |
+
return jsonify({
|
292 |
+
"status": "error",
|
293 |
+
"message": "Invalid coordinates"
|
294 |
+
}), 400
|
295 |
+
|
296 |
+
# Initialize location processor
|
297 |
+
location_processor = LocationProcessor()
|
298 |
+
|
299 |
+
# Set location and find nearby properties
|
300 |
+
result = location_processor.set_location(latitude, longitude, session_id)
|
301 |
+
|
302 |
+
return jsonify(result)
|
303 |
+
|
304 |
+
except Exception as e:
|
305 |
+
logger.error(f"Error in set_location: {str(e)}")
|
306 |
+
return jsonify({
|
307 |
+
"status": "error",
|
308 |
+
"message": "Error processing location"
|
309 |
+
}), 500
|
310 |
+
|
311 |
+
@app.route('/check-input-limit', methods=['GET'])
|
312 |
+
@security_check
|
313 |
+
def check_input_limit():
|
314 |
+
try:
|
315 |
+
session_id = request.args.get('session_id')
|
316 |
+
if not session_id:
|
317 |
+
return jsonify({"error": "session_id is required"}), 400
|
318 |
+
|
319 |
+
current_plan = get_current_plan()
|
320 |
+
remaining_inputs = input_tracker.get_remaining_inputs(session_id, current_plan)
|
321 |
+
usage_stats = input_tracker.get_usage_stats(session_id)
|
322 |
+
|
323 |
+
return jsonify({
|
324 |
+
"plan": current_plan.value,
|
325 |
+
"remaining_inputs": remaining_inputs,
|
326 |
+
"total_limit": PLAN_INPUT_LIMITS[current_plan],
|
327 |
+
"usage_stats": usage_stats
|
328 |
+
})
|
329 |
+
|
330 |
+
except Exception as e:
|
331 |
+
logging.error(f"Error checking input limit: {str(e)}")
|
332 |
+
return jsonify({"error": "Error checking input limit"}), 500
|
333 |
+
|
334 |
+
@app.route('/recommend', methods=['POST'])
|
335 |
+
@security_check
|
336 |
+
@limiter.limit("30 per minute")
|
337 |
+
@with_user_plan
|
338 |
+
def recommend():
|
339 |
+
try:
|
340 |
+
data = request.json
|
341 |
+
query = data.get('query')
|
342 |
+
session_id = data.get('session_id')
|
343 |
+
continue_conversation = data.get('continue', False)
|
344 |
+
current_plan = get_current_plan()
|
345 |
+
|
346 |
+
if not query:
|
347 |
+
return jsonify({"error": "Query parameter is missing"}), 400
|
348 |
+
|
349 |
+
# Clean and validate input
|
350 |
+
cleaned_query = query_validator.clean_input(query)
|
351 |
+
if not query_validator.validate_query_length(cleaned_query):
|
352 |
+
return jsonify({"error": "Query too long"}), 400
|
353 |
+
|
354 |
+
# Check if query is related to real estate
|
355 |
+
if not query_validator.is_real_estate_query(cleaned_query):
|
356 |
+
return jsonify({
|
357 |
+
"response": "I'm a real estate chatbot. I can help you with property-related queries like finding apartments, PG accommodations, hostels, or commercial properties. Please ask me about properties!",
|
358 |
+
"is_real_estate": False
|
359 |
+
})
|
360 |
+
|
361 |
+
# Special handling for "hi" query
|
362 |
+
if cleaned_query.lower() == 'hi':
|
363 |
+
return jsonify({
|
364 |
+
"response": "Do you want to know the properties located near you? (yes/no):",
|
365 |
+
"is_location_query": True
|
366 |
+
})
|
367 |
+
|
368 |
+
# Special handling for "yes" after "hi"
|
369 |
+
if cleaned_query.lower() == 'yes':
|
370 |
+
# Get location from the request
|
371 |
+
latitude = data.get('latitude')
|
372 |
+
longitude = data.get('longitude')
|
373 |
+
|
374 |
+
if not latitude or not longitude:
|
375 |
+
return jsonify({
|
376 |
+
"error": "Location not available. Please allow location access or set your location first.",
|
377 |
+
"needs_location": True
|
378 |
+
}), 400
|
379 |
+
|
380 |
+
# Initialize location processor
|
381 |
+
location_processor = LocationProcessor()
|
382 |
+
|
383 |
+
# Get nearby properties
|
384 |
+
result = location_processor.set_location(latitude, longitude, session_id)
|
385 |
+
|
386 |
+
if result["status"] == "success":
|
387 |
+
# Format the response for frontend
|
388 |
+
properties = result["properties"]
|
389 |
+
response_text = "Here are the properties near your location:\n\n"
|
390 |
+
|
391 |
+
for i, prop in enumerate(properties, 1):
|
392 |
+
response_text += (
|
393 |
+
f"{i}. {prop.get('PropertyName', 'Unnamed Property')}\n"
|
394 |
+
f" Address: {prop.get('Address', 'No address available')}\n"
|
395 |
+
f" Distance: {prop.get('Distance', 0)} km\n"
|
396 |
+
f" Type: {prop.get('PropertyType', 'Not specified')}\n"
|
397 |
+
f" Price: ${prop.get('MarketValue', 0):,.2f}\n\n"
|
398 |
+
)
|
399 |
+
|
400 |
+
return jsonify({
|
401 |
+
# "response": response_text,
|
402 |
+
"properties": properties,
|
403 |
+
"location": result["location"],
|
404 |
+
"is_location_based": True,
|
405 |
+
"status": "success"
|
406 |
+
})
|
407 |
+
else:
|
408 |
+
return jsonify({
|
409 |
+
"error": "No properties found near your location",
|
410 |
+
"status": "error"
|
411 |
+
}), 404
|
412 |
+
|
413 |
+
# Handle regular queries with RAG-based recommendation
|
414 |
+
if session_id in conversation_context and continue_conversation:
|
415 |
+
previous_results = conversation_context[session_id]
|
416 |
+
combined_query = f"Based on previous results:{previous_results}New Query: {cleaned_query}"
|
417 |
+
raw_results = retriever.retrieve(combined_query, top_k=5)
|
418 |
+
else:
|
419 |
+
raw_results = retriever.retrieve(cleaned_query, top_k=5)
|
420 |
+
|
421 |
+
# Filter results based on user plan
|
422 |
+
filtered_results = []
|
423 |
+
for result in raw_results:
|
424 |
+
property_dict = result['property'].to_dict() if hasattr(result['property'], 'to_dict') else result['property']
|
425 |
+
property_dict = convert_numeric_fields_to_int(property_dict)
|
426 |
+
filtered_property = filter_property_by_plan(property_dict, current_plan)
|
427 |
+
|
428 |
+
if 'propertyImages' in filtered_property:
|
429 |
+
del filtered_property['propertyImages']
|
430 |
+
if 'property_image' in filtered_property:
|
431 |
+
del filtered_property['property_image']
|
432 |
+
if 'image_url' in filtered_property:
|
433 |
+
del filtered_property['image_url']
|
434 |
+
|
435 |
+
filtered_results.append({
|
436 |
+
'property': filtered_property,
|
437 |
+
'propertyImages': result.get('image_url', []) if current_plan == UserPlan.PRO else [],
|
438 |
+
'distance': result.get('distance')
|
439 |
+
})
|
440 |
+
|
441 |
+
# Generate response
|
442 |
+
response_text, has_restricted_request = format_llm_prompt(
|
443 |
+
query=combined_query if continue_conversation else cleaned_query,
|
444 |
+
filtered_results=filtered_results,
|
445 |
+
user_plan=current_plan,
|
446 |
+
original_query=cleaned_query
|
447 |
+
)
|
448 |
+
|
449 |
+
response, duration = generate_response(
|
450 |
+
response_text,
|
451 |
+
tokenizer=tokenizer,
|
452 |
+
model_llm=model_llm,
|
453 |
+
max_new_tokens=512,
|
454 |
+
temperature=0.7,
|
455 |
+
top_k=30,
|
456 |
+
top_p=0.8,
|
457 |
+
repetition_penalty=1.05
|
458 |
+
)
|
459 |
+
|
460 |
+
# Store the response in conversation context
|
461 |
+
conversation_context[session_id] = response
|
462 |
+
|
463 |
+
return jsonify({
|
464 |
+
"response": response,
|
465 |
+
"duration": duration,
|
466 |
+
"plan_level": current_plan.value,
|
467 |
+
"filtered_results": filtered_results,
|
468 |
+
"input_limit_info": {
|
469 |
+
"remaining_inputs": input_tracker.get_remaining_inputs(session_id, current_plan),
|
470 |
+
"total_limit": PLAN_INPUT_LIMITS[current_plan],
|
471 |
+
"usage_stats": input_tracker.get_usage_stats(session_id)
|
472 |
+
}
|
473 |
+
})
|
474 |
+
|
475 |
+
except Exception as e:
|
476 |
+
logging.error(f"Error in recommend endpoint: {str(e)}")
|
477 |
+
return jsonify({"error": "An error occurred processing your request"}), 500
|
478 |
+
|
479 |
+
@app.route('/api/properties/search', methods=['POST'])
|
480 |
+
def search_properties():
|
481 |
+
try:
|
482 |
+
data = request.get_json()
|
483 |
+
query = data.get('query', '')
|
484 |
+
user_location = data.get('user_location') # (latitude, longitude)
|
485 |
+
|
486 |
+
# Get properties from database or external source
|
487 |
+
properties = get_properties() # Implement this function to get properties
|
488 |
+
|
489 |
+
# Process query and get filtered properties
|
490 |
+
results = chatbot_processor.process_query(
|
491 |
+
query, properties, user_location
|
492 |
+
)
|
493 |
+
|
494 |
+
return jsonify({
|
495 |
+
'status': 'success',
|
496 |
+
'results': results
|
497 |
+
})
|
498 |
+
|
499 |
+
except Exception as e:
|
500 |
+
logging.error(f"Error searching properties: {str(e)}")
|
501 |
+
return jsonify({
|
502 |
+
'status': 'error',
|
503 |
+
'message': str(e)
|
504 |
+
}), 500
|
505 |
+
|
506 |
+
@app.route('/api/properties/similar', methods=['POST'])
|
507 |
+
def find_similar_properties():
|
508 |
+
try:
|
509 |
+
data = request.get_json()
|
510 |
+
reference_property = data.get('property')
|
511 |
+
top_k = data.get('top_k', 5)
|
512 |
+
|
513 |
+
# Get properties from database or external source
|
514 |
+
properties = get_properties() # Implement this function to get properties
|
515 |
+
|
516 |
+
# Find similar properties
|
517 |
+
results = chatbot_processor.get_similar_properties(
|
518 |
+
reference_property, properties, top_k
|
519 |
+
)
|
520 |
+
|
521 |
+
return jsonify({
|
522 |
+
'status': 'success',
|
523 |
+
'results': results
|
524 |
+
})
|
525 |
+
|
526 |
+
except Exception as e:
|
527 |
+
logging.error(f"Error finding similar properties: {str(e)}")
|
528 |
+
return jsonify({
|
529 |
+
'status': 'error',
|
530 |
+
'message': str(e)
|
531 |
+
}), 500
|
532 |
+
|
533 |
+
@app.route('/api/properties/landmarks', methods=['POST'])
|
534 |
+
def get_property_landmarks():
|
535 |
+
try:
|
536 |
+
data = request.get_json()
|
537 |
+
property_data = data.get('property')
|
538 |
+
radius_miles = data.get('radius_miles', 5.0)
|
539 |
+
|
540 |
+
# Get nearby landmarks
|
541 |
+
landmarks = chatbot_processor.get_nearby_landmarks(
|
542 |
+
property_data, radius_miles
|
543 |
+
)
|
544 |
+
|
545 |
+
return jsonify({
|
546 |
+
'status': 'success',
|
547 |
+
'landmarks': landmarks
|
548 |
+
})
|
549 |
+
|
550 |
+
except Exception as e:
|
551 |
+
logging.error(f"Error getting property landmarks: {str(e)}")
|
552 |
+
return jsonify({
|
553 |
+
'status': 'error',
|
554 |
+
'message': str(e)
|
555 |
+
}), 500
|
556 |
+
|
557 |
+
@app.route('/api/properties/location', methods=['POST'])
|
558 |
+
def get_property_location():
|
559 |
+
try:
|
560 |
+
data = request.get_json()
|
561 |
+
property_data = data.get('property')
|
562 |
+
|
563 |
+
# Get location details
|
564 |
+
location_details = chatbot_processor.get_location_details(property_data)
|
565 |
+
|
566 |
+
return jsonify({
|
567 |
+
'status': 'success',
|
568 |
+
'location': location_details
|
569 |
+
})
|
570 |
+
|
571 |
+
except Exception as e:
|
572 |
+
logging.error(f"Error getting property location: {str(e)}")
|
573 |
+
return jsonify({
|
574 |
+
'status': 'error',
|
575 |
+
'message': str(e)
|
576 |
+
}), 500
|
577 |
+
|
578 |
+
@app.errorhandler(429)
|
579 |
+
def ratelimit_handler(e):
|
580 |
+
return jsonify({"error": "Rate limit exceeded"}), 429
|
581 |
+
|
582 |
+
@app.errorhandler(400)
|
583 |
+
def bad_request_handler(e):
|
584 |
+
return jsonify({"error": "Bad request"}), 400
|
585 |
+
|
586 |
+
@app.errorhandler(500)
|
587 |
+
def internal_error_handler(e):
|
588 |
+
return jsonify({"error": "Internal server error"}), 500
|
589 |
+
|
590 |
+
# Add helper functions
|
591 |
+
def convert_numeric_fields_to_int(property_dict):
|
592 |
+
"""Convert numeric fields to integers in property dictionary"""
|
593 |
+
numeric_fields = ['Bedrooms', 'Bathrooms', 'SquareFeet', 'YearBuilt', 'Price']
|
594 |
+
for field in numeric_fields:
|
595 |
+
if field in property_dict and property_dict[field] is not None:
|
596 |
+
try:
|
597 |
+
property_dict[field] = int(float(property_dict[field]))
|
598 |
+
except (ValueError, TypeError):
|
599 |
+
property_dict[field] = None
|
600 |
+
return property_dict
|
601 |
+
|
602 |
+
if __name__ == '__main__':
|
603 |
+
# Get port from environment variable or default to 7860 for Hugging Face Spaces
|
604 |
+
port = int(os.environ.get('PORT', 7860))
|
605 |
+
app.run(host='0.0.0.0', port=port)
|
modules/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# This file makes the modules directory a Python package
|
modules/audio.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import webrtcvad
|
3 |
+
import collections
|
4 |
+
import speech_recognition as sr
|
5 |
+
from pydub import AudioSegment
|
6 |
+
from happytransformer import HappyTextToText, TTSettings
|
7 |
+
import logging
|
8 |
+
|
9 |
+
# Configure logging
|
10 |
+
logging.basicConfig(level=logging.INFO)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
class VADAudio:
|
14 |
+
def __init__(self, aggressiveness=3):
|
15 |
+
self.vad = webrtcvad.Vad(aggressiveness)
|
16 |
+
self.sample_rate = 16000
|
17 |
+
self.frame_duration_ms = 30
|
18 |
+
|
19 |
+
def frame_generator(self, audio, frame_duration_ms, sample_rate):
|
20 |
+
n = int(sample_rate * (frame_duration_ms / 1000.0))
|
21 |
+
offset = 0
|
22 |
+
while offset + n < len(audio):
|
23 |
+
yield audio[offset:offset + n]
|
24 |
+
offset += n
|
25 |
+
|
26 |
+
def vad_collector(self, audio, sample_rate, frame_duration_ms, padding_duration_ms=300, aggressiveness=3):
|
27 |
+
vad = webrtcvad.Vad(aggressiveness)
|
28 |
+
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
|
29 |
+
ring_buffer = collections.deque(maxlen=num_padding_frames)
|
30 |
+
triggered = False
|
31 |
+
|
32 |
+
for frame in self.frame_generator(audio, frame_duration_ms, sample_rate):
|
33 |
+
is_speech = vad.is_speech(frame, sample_rate)
|
34 |
+
if not triggered:
|
35 |
+
ring_buffer.append((frame, is_speech))
|
36 |
+
num_voiced = len([f for f, speech in ring_buffer if speech])
|
37 |
+
if num_voiced > 0.9 * ring_buffer.maxlen:
|
38 |
+
triggered = True
|
39 |
+
for f, s in ring_buffer:
|
40 |
+
yield f
|
41 |
+
ring_buffer.clear()
|
42 |
+
else:
|
43 |
+
yield frame
|
44 |
+
ring_buffer.append((frame, is_speech))
|
45 |
+
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
|
46 |
+
if num_unvoiced > 0.9 * ring_buffer.maxlen:
|
47 |
+
triggered = False
|
48 |
+
yield b''.join([f for f in ring_buffer])
|
49 |
+
ring_buffer.clear()
|
50 |
+
|
51 |
+
def transcribe_with_vad(audio_file):
|
52 |
+
try:
|
53 |
+
vad_audio = VADAudio()
|
54 |
+
audio = AudioSegment.from_file(audio_file)
|
55 |
+
audio = audio.set_frame_rate(vad_audio.sample_rate).set_channels(1)
|
56 |
+
raw_audio = audio.raw_data
|
57 |
+
|
58 |
+
frames = vad_audio.vad_collector(raw_audio, vad_audio.sample_rate, vad_audio.frame_duration_ms)
|
59 |
+
for frame in frames:
|
60 |
+
if len(frame) > 0:
|
61 |
+
recognizer = sr.Recognizer()
|
62 |
+
recognizer.energy_threshold = 300
|
63 |
+
recognizer.dynamic_energy_threshold = True
|
64 |
+
recognizer.pause_threshold = 0.8
|
65 |
+
|
66 |
+
audio_data = sr.AudioData(frame, vad_audio.sample_rate, audio.sample_width)
|
67 |
+
try:
|
68 |
+
text = recognizer.recognize_google(audio_data, language="en-US")
|
69 |
+
if text.strip():
|
70 |
+
print(f"Transcription: {text}")
|
71 |
+
return text
|
72 |
+
except sr.UnknownValueError:
|
73 |
+
logger.warning("Google Speech Recognition could not understand the audio")
|
74 |
+
except sr.RequestError as e:
|
75 |
+
logger.error(f"Could not request results from Google Speech Recognition service; {e}")
|
76 |
+
return ""
|
77 |
+
except Exception as e:
|
78 |
+
logger.error(f"Error in transcribe_with_vad: {str(e)}")
|
79 |
+
return ""
|
80 |
+
|
81 |
+
def process_audio_file(audio_file):
|
82 |
+
if 'audio' not in audio_file:
|
83 |
+
return {"error": "No audio file provided"}, 400
|
84 |
+
|
85 |
+
# Ensure the file has an allowed extension
|
86 |
+
allowed_extensions = {'wav', 'mp3', 'ogg', 'webm'}
|
87 |
+
if '.' not in audio_file.filename or \
|
88 |
+
audio_file.filename.rsplit('.', 1)[1].lower() not in allowed_extensions:
|
89 |
+
return {"error": "Invalid audio file format"}, 400
|
90 |
+
|
91 |
+
temp_path = None
|
92 |
+
wav_path = None
|
93 |
+
try:
|
94 |
+
# Save the uploaded file temporarily
|
95 |
+
temp_dir = os.path.join(os.getcwd(), 'temp')
|
96 |
+
os.makedirs(temp_dir, exist_ok=True)
|
97 |
+
temp_path = os.path.join(temp_dir, 'temp_audio.' + audio_file.filename.rsplit('.', 1)[1].lower())
|
98 |
+
|
99 |
+
audio_file.save(temp_path)
|
100 |
+
|
101 |
+
# Convert audio to proper format if needed
|
102 |
+
audio = AudioSegment.from_file(temp_path)
|
103 |
+
audio = audio.set_channels(1) # Convert to mono
|
104 |
+
audio = audio.set_frame_rate(16000) # Set sample rate to 16kHz
|
105 |
+
audio = audio.normalize() # Normalize audio levels
|
106 |
+
|
107 |
+
# Save as WAV for speech recognition
|
108 |
+
wav_path = os.path.join(temp_dir, 'temp_audio.wav')
|
109 |
+
audio.export(wav_path, format="wav", parameters=["-ar", "16000", "-ac", "1"])
|
110 |
+
|
111 |
+
# Try VAD-based transcription first
|
112 |
+
text = transcribe_with_vad(wav_path)
|
113 |
+
|
114 |
+
# If VAD fails, try direct recognition
|
115 |
+
if not text:
|
116 |
+
recognizer = sr.Recognizer()
|
117 |
+
recognizer.energy_threshold = 300
|
118 |
+
recognizer.dynamic_energy_threshold = True
|
119 |
+
recognizer.pause_threshold = 0.8
|
120 |
+
|
121 |
+
with sr.AudioFile(wav_path) as source:
|
122 |
+
recognizer.adjust_for_ambient_noise(source, duration=0.5)
|
123 |
+
audio_data = recognizer.record(source)
|
124 |
+
text = recognizer.recognize_google(audio_data, language="en-US")
|
125 |
+
|
126 |
+
if not text.strip():
|
127 |
+
return {"error": "No speech detected. Please try speaking again."}, 400
|
128 |
+
|
129 |
+
# Grammar correction
|
130 |
+
try:
|
131 |
+
happy_tt = HappyTextToText("T5", "vennify/t5-base-grammar-correction")
|
132 |
+
settings = TTSettings(do_sample=True, top_k=50, temperature=0.7)
|
133 |
+
corrected_text = happy_tt.generate_text(f"grammar: {text}", args=settings)
|
134 |
+
corrected_text = corrected_text.text
|
135 |
+
except Exception as e:
|
136 |
+
logger.error(f"Grammar correction failed: {str(e)}")
|
137 |
+
corrected_text = text # Fall back to original text
|
138 |
+
|
139 |
+
print(f"Original Transcription: {text}")
|
140 |
+
print(f"Corrected Transcription: {corrected_text}")
|
141 |
+
|
142 |
+
return {
|
143 |
+
"transcription": corrected_text,
|
144 |
+
"original": text
|
145 |
+
}
|
146 |
+
|
147 |
+
except sr.UnknownValueError:
|
148 |
+
return {"error": "Could not understand audio. Please speak more clearly."}, 400
|
149 |
+
except sr.RequestError as e:
|
150 |
+
return {"error": f"Speech recognition service error: {str(e)}"}, 500
|
151 |
+
except Exception as e:
|
152 |
+
logger.error(f"Error processing audio: {str(e)}")
|
153 |
+
return {"error": f"Audio processing error: {str(e)}"}, 500
|
154 |
+
finally:
|
155 |
+
# Ensure temp files are cleaned up even if an error occurs
|
156 |
+
if temp_path and os.path.exists(temp_path):
|
157 |
+
try:
|
158 |
+
os.remove(temp_path)
|
159 |
+
except Exception as e:
|
160 |
+
logger.error(f"Error removing temp file: {str(e)}")
|
161 |
+
if wav_path and os.path.exists(wav_path):
|
162 |
+
try:
|
163 |
+
os.remove(wav_path)
|
164 |
+
except Exception as e:
|
165 |
+
logger.error(f"Error removing wav file: {str(e)}")
|
modules/chatbot_processor.py
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Optional, Union, Tuple
|
2 |
+
import logging
|
3 |
+
from .nlp_processor import NLPProcessor
|
4 |
+
from .location_processor import LocationProcessor
|
5 |
+
from .property_processor import PropertyProcessor
|
6 |
+
from .models import fetch_and_cache_properties, CustomRagRetriever
|
7 |
+
|
8 |
+
# Configure logging
|
9 |
+
logging.basicConfig(level=logging.INFO)
|
10 |
+
logger = logging.getLogger(__name__)
|
11 |
+
|
12 |
+
class ChatbotProcessor:
|
13 |
+
def __init__(self):
|
14 |
+
self.nlp_processor = NLPProcessor()
|
15 |
+
self.location_processor = LocationProcessor()
|
16 |
+
self.property_processor = PropertyProcessor()
|
17 |
+
logger.info("Initialized ChatbotProcessor")
|
18 |
+
|
19 |
+
def process_query(self,
|
20 |
+
query: str,
|
21 |
+
user_location: Optional[Tuple[float, float]] = None,
|
22 |
+
retriever: Optional[CustomRagRetriever] = None) -> List[Dict]:
|
23 |
+
"""Process user query and return filtered properties"""
|
24 |
+
try:
|
25 |
+
logger.info(f"Processing query: {query}")
|
26 |
+
|
27 |
+
# Extract numerical values and criteria from query
|
28 |
+
criteria = self.nlp_processor.process_query(query)
|
29 |
+
logger.info(f"Extracted criteria: {criteria}")
|
30 |
+
|
31 |
+
# Get properties from cache
|
32 |
+
properties = fetch_and_cache_properties()
|
33 |
+
if not properties:
|
34 |
+
logger.error("No properties available in cache")
|
35 |
+
return []
|
36 |
+
|
37 |
+
# Process properties
|
38 |
+
processed_properties = [
|
39 |
+
self.property_processor.process_property_data(p)
|
40 |
+
for p in properties
|
41 |
+
]
|
42 |
+
logger.info(f"Processed {len(processed_properties)} properties")
|
43 |
+
|
44 |
+
# Apply filters based on extracted criteria
|
45 |
+
filtered_properties = processed_properties
|
46 |
+
|
47 |
+
# Filter by BHK if specified
|
48 |
+
if criteria.get('bhk'):
|
49 |
+
filtered_properties = self.property_processor.filter_by_bhk(
|
50 |
+
filtered_properties, criteria['bhk']
|
51 |
+
)
|
52 |
+
logger.info(f"Filtered by BHK: {criteria['bhk']}, remaining: {len(filtered_properties)}")
|
53 |
+
|
54 |
+
# Filter by bathrooms if specified
|
55 |
+
if criteria.get('bathrooms'):
|
56 |
+
filtered_properties = self.property_processor.filter_by_bathrooms(
|
57 |
+
filtered_properties, criteria['bathrooms']
|
58 |
+
)
|
59 |
+
logger.info(f"Filtered by bathrooms: {criteria['bathrooms']}, remaining: {len(filtered_properties)}")
|
60 |
+
|
61 |
+
# Filter by square footage if specified
|
62 |
+
if criteria.get('square_footage'):
|
63 |
+
min_sqft, max_sqft = criteria['square_footage']
|
64 |
+
filtered_properties = self.property_processor.filter_by_square_footage(
|
65 |
+
filtered_properties, min_sqft, max_sqft
|
66 |
+
)
|
67 |
+
logger.info(f"Filtered by square footage: {min_sqft}-{max_sqft}, remaining: {len(filtered_properties)}")
|
68 |
+
|
69 |
+
# Filter by market value if specified
|
70 |
+
if criteria.get('market_value'):
|
71 |
+
min_value, max_value = criteria['market_value']
|
72 |
+
filtered_properties = self.property_processor.filter_by_market_value(
|
73 |
+
filtered_properties, min_value, max_value
|
74 |
+
)
|
75 |
+
logger.info(f"Filtered by market value: {min_value}-{max_value}, remaining: {len(filtered_properties)}")
|
76 |
+
|
77 |
+
# Filter by year built if specified
|
78 |
+
if criteria.get('year_built'):
|
79 |
+
min_year, max_year = criteria['year_built']
|
80 |
+
filtered_properties = self.property_processor.filter_by_year_built(
|
81 |
+
filtered_properties, min_year, max_year
|
82 |
+
)
|
83 |
+
logger.info(f"Filtered by year built: {min_year}-{max_year}, remaining: {len(filtered_properties)}")
|
84 |
+
|
85 |
+
# Filter by status if specified
|
86 |
+
if criteria.get('status'):
|
87 |
+
filtered_properties = self.property_processor.filter_by_status(
|
88 |
+
filtered_properties, criteria['status']
|
89 |
+
)
|
90 |
+
logger.info(f"Filtered by status: {criteria['status']}, remaining: {len(filtered_properties)}")
|
91 |
+
|
92 |
+
# Handle location-based filtering
|
93 |
+
if user_location:
|
94 |
+
# Calculate distances from user location
|
95 |
+
filtered_properties = self.location_processor.calculate_distances(
|
96 |
+
user_location, filtered_properties
|
97 |
+
)
|
98 |
+
logger.info(f"Calculated distances for {len(filtered_properties)} properties")
|
99 |
+
|
100 |
+
# Filter by distance if specified
|
101 |
+
if criteria.get('max_distance'):
|
102 |
+
filtered_properties = self.location_processor.filter_by_location_criteria(
|
103 |
+
filtered_properties,
|
104 |
+
{'max_distance': criteria['max_distance']}
|
105 |
+
)
|
106 |
+
logger.info(f"Filtered by max distance: {criteria['max_distance']}, remaining: {len(filtered_properties)}")
|
107 |
+
|
108 |
+
# Handle landmark-based filtering
|
109 |
+
if criteria.get('landmarks'):
|
110 |
+
landmark_criteria = {
|
111 |
+
'nearby_landmarks': criteria['landmarks']
|
112 |
+
}
|
113 |
+
filtered_properties = self.location_processor.filter_by_location_criteria(
|
114 |
+
filtered_properties, landmark_criteria
|
115 |
+
)
|
116 |
+
logger.info(f"Filtered by landmarks: {criteria['landmarks']}, remaining: {len(filtered_properties)}")
|
117 |
+
|
118 |
+
# Use RAG retriever if available
|
119 |
+
if retriever:
|
120 |
+
logger.info("Using RAG retriever for semantic search")
|
121 |
+
rag_results = retriever.retrieve(query, top_k=5)
|
122 |
+
if rag_results:
|
123 |
+
# Merge RAG results with filtered properties
|
124 |
+
rag_properties = [r['property'] for r in rag_results]
|
125 |
+
filtered_properties = [p for p in filtered_properties if p in rag_properties]
|
126 |
+
logger.info(f"After RAG filtering, remaining: {len(filtered_properties)}")
|
127 |
+
|
128 |
+
# Format property details for display
|
129 |
+
formatted_properties = []
|
130 |
+
for property_data in filtered_properties:
|
131 |
+
formatted_property = {
|
132 |
+
'details': self.property_processor.format_property_details(property_data),
|
133 |
+
'data': property_data
|
134 |
+
}
|
135 |
+
formatted_properties.append(formatted_property)
|
136 |
+
|
137 |
+
logger.info(f"Returning {len(formatted_properties)} formatted properties")
|
138 |
+
return formatted_properties
|
139 |
+
|
140 |
+
except Exception as e:
|
141 |
+
logger.error(f"Error processing query: {str(e)}")
|
142 |
+
return []
|
143 |
+
|
144 |
+
def get_similar_properties(self,
|
145 |
+
reference_property: Dict,
|
146 |
+
properties: List[Dict],
|
147 |
+
top_k: int = 5) -> List[Dict]:
|
148 |
+
"""Find properties similar to reference property"""
|
149 |
+
try:
|
150 |
+
logger.info(f"Finding similar properties to: {reference_property.get('PropertyName', 'Unknown')}")
|
151 |
+
|
152 |
+
# Process properties
|
153 |
+
processed_properties = [
|
154 |
+
self.property_processor.process_property_data(p)
|
155 |
+
for p in properties
|
156 |
+
]
|
157 |
+
|
158 |
+
# Find similar properties
|
159 |
+
similar_properties = self.property_processor.find_similar_properties(
|
160 |
+
reference_property, processed_properties, top_k
|
161 |
+
)
|
162 |
+
|
163 |
+
# Format results
|
164 |
+
formatted_results = []
|
165 |
+
for property_data, similarity in similar_properties:
|
166 |
+
formatted_property = {
|
167 |
+
'details': self.property_processor.format_property_details(property_data),
|
168 |
+
'data': property_data,
|
169 |
+
'similarity': similarity
|
170 |
+
}
|
171 |
+
formatted_results.append(formatted_property)
|
172 |
+
|
173 |
+
logger.info(f"Found {len(formatted_results)} similar properties")
|
174 |
+
return formatted_results
|
175 |
+
|
176 |
+
except Exception as e:
|
177 |
+
logger.error(f"Error finding similar properties: {str(e)}")
|
178 |
+
return []
|
179 |
+
|
180 |
+
def get_nearby_landmarks(self,
|
181 |
+
property_data: Dict,
|
182 |
+
radius_miles: float = 5.0) -> List[Dict]:
|
183 |
+
"""Get landmarks near a property"""
|
184 |
+
try:
|
185 |
+
logger.info(f"Finding landmarks near property: {property_data.get('PropertyName', 'Unknown')}")
|
186 |
+
|
187 |
+
# Get property coordinates
|
188 |
+
latitude = float(property_data.get('Latitude', 0))
|
189 |
+
longitude = float(property_data.get('Longitude', 0))
|
190 |
+
|
191 |
+
if latitude and longitude:
|
192 |
+
landmarks = self.location_processor.find_nearby_landmarks(
|
193 |
+
latitude, longitude, radius_miles
|
194 |
+
)
|
195 |
+
logger.info(f"Found {len(landmarks)} nearby landmarks")
|
196 |
+
return landmarks
|
197 |
+
return []
|
198 |
+
|
199 |
+
except Exception as e:
|
200 |
+
logger.error(f"Error finding nearby landmarks: {str(e)}")
|
201 |
+
return []
|
202 |
+
|
203 |
+
def get_location_details(self, property_data: Dict) -> Dict:
|
204 |
+
"""Get detailed location information for a property"""
|
205 |
+
try:
|
206 |
+
logger.info(f"Getting location details for property: {property_data.get('PropertyName', 'Unknown')}")
|
207 |
+
|
208 |
+
# Get property coordinates
|
209 |
+
latitude = float(property_data.get('Latitude', 0))
|
210 |
+
longitude = float(property_data.get('Longitude', 0))
|
211 |
+
|
212 |
+
if latitude and longitude:
|
213 |
+
details = self.location_processor.get_location_details(
|
214 |
+
latitude, longitude
|
215 |
+
)
|
216 |
+
logger.info(f"Location details: {details}")
|
217 |
+
return details
|
218 |
+
return {}
|
219 |
+
|
220 |
+
except Exception as e:
|
221 |
+
logger.error(f"Error getting location details: {str(e)}")
|
222 |
+
return {}
|
223 |
+
|
224 |
+
def find_similar_properties(self, query, top_k=5):
|
225 |
+
"""Find similar properties using the retriever"""
|
226 |
+
try:
|
227 |
+
# Get properties from retriever
|
228 |
+
results = self.retriever.retrieve(query, top_k=top_k)
|
229 |
+
|
230 |
+
# Ensure we have exactly 5 properties
|
231 |
+
while len(results) < 5:
|
232 |
+
# Add remaining properties with high distance scores
|
233 |
+
remaining_idx = len(results)
|
234 |
+
properties = fetch_and_cache_properties()
|
235 |
+
if remaining_idx < len(properties):
|
236 |
+
property_data = properties[remaining_idx]
|
237 |
+
formatted_property = format_property_details(property_data)
|
238 |
+
if formatted_property:
|
239 |
+
results.append({
|
240 |
+
"property": formatted_property,
|
241 |
+
"distance": 1.0 # High distance score for additional properties
|
242 |
+
})
|
243 |
+
|
244 |
+
return results[:5] # Return exactly 5 properties
|
245 |
+
except Exception as e:
|
246 |
+
logger.error(f"Error finding similar properties: {str(e)}")
|
247 |
+
return []
|
modules/config.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from enum import Enum
|
4 |
+
|
5 |
+
# Load environment variables
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
# User Plan Enum
|
9 |
+
class UserPlan(Enum):
|
10 |
+
BASIC = "basic"
|
11 |
+
PLUS = "plus"
|
12 |
+
PRO = "pro"
|
13 |
+
|
14 |
+
# API Keys and Credentials
|
15 |
+
API_KEY = os.getenv("GOOGLE_API_KEY")
|
16 |
+
CSE_ID = os.getenv("GOOGLE_CSE_ID")
|
17 |
+
CLOUDINARY_CLOUD_NAME = os.getenv("CLOUDINARY_CLOUD_NAME")
|
18 |
+
CLOUDINARY_API_KEY = os.getenv("CLOUDINARY_API_KEY")
|
19 |
+
CLOUDINARY_API_SECRET = os.getenv("CLOUDINARY_API_SECRET")
|
20 |
+
NGROK_AUTH_TOKEN = os.getenv("NGROK_AUTH_TOKEN")
|
21 |
+
SECRET_KEY = os.getenv('SECRET_KEY', 'your-secret-key-here')
|
22 |
+
|
23 |
+
# Base directory for the project
|
24 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
25 |
+
|
26 |
+
# Model Paths - Updated for local deployment
|
27 |
+
MODEL_DIR = os.getenv("MODEL_DIR", os.path.join(BASE_DIR, "models", "saved_models"))
|
28 |
+
LLM_MODEL_DIR = os.getenv("LLM_MODEL_DIR", os.path.join(BASE_DIR, "models", "llm"))
|
29 |
+
FEATURE_MATCHER_DIR = os.path.join(MODEL_DIR, "feature_matcher")
|
30 |
+
BASE_MODEL_DIR = os.path.join(FEATURE_MATCHER_DIR, "base_model")
|
31 |
+
SEMANTIC_MODEL_DIR = os.path.join(FEATURE_MATCHER_DIR, "semantic_model")
|
32 |
+
|
33 |
+
# Create necessary directories
|
34 |
+
os.makedirs(MODEL_DIR, exist_ok=True)
|
35 |
+
os.makedirs(LLM_MODEL_DIR, exist_ok=True)
|
36 |
+
os.makedirs(FEATURE_MATCHER_DIR, exist_ok=True)
|
37 |
+
os.makedirs(BASE_MODEL_DIR, exist_ok=True)
|
38 |
+
os.makedirs(SEMANTIC_MODEL_DIR, exist_ok=True)
|
39 |
+
os.makedirs(os.path.join(BASE_DIR, "temp"), exist_ok=True)
|
40 |
+
|
41 |
+
# Rate Limiting
|
42 |
+
RATE_LIMIT_WINDOW = int(os.getenv("RATE_LIMIT_WINDOW", 60))
|
43 |
+
MAX_REQUESTS_PER_WINDOW = int(os.getenv("MAX_REQUESTS_PER_WINDOW", 30))
|
44 |
+
MAX_QUERY_LENGTH = int(os.getenv("MAX_QUERY_LENGTH", 1000))
|
45 |
+
|
46 |
+
# Cache Settings
|
47 |
+
CACHE_TTL = int(os.getenv("CACHE_TTL", 3600))
|
48 |
+
|
49 |
+
# Domain Classifier
|
50 |
+
DOMAIN_CLASSIFIER_MODEL = os.getenv("DOMAIN_CLASSIFIER_MODEL", "distilbert-base-uncased")
|
51 |
+
|
52 |
+
# Plan input limits
|
53 |
+
PLAN_INPUT_LIMITS = {
|
54 |
+
UserPlan.BASIC: int(os.getenv("BASIC_PLAN_LIMIT", 5)),
|
55 |
+
UserPlan.PLUS: int(os.getenv("PLUS_PLAN_LIMIT", 10)),
|
56 |
+
UserPlan.PRO: int(os.getenv("PRO_PLAN_LIMIT", 20))
|
57 |
+
}
|
58 |
+
|
59 |
+
# Plan-specific fields
|
60 |
+
PLAN_FIELDS = {
|
61 |
+
UserPlan.BASIC: {
|
62 |
+
"PropertyName", "Address", "City", "State", "ZipCode",
|
63 |
+
"LeasableSquareFeet", "NumberOfRooms", "Beds", "Baths",
|
64 |
+
"PropertyStatus", "Description"
|
65 |
+
},
|
66 |
+
UserPlan.PLUS: {
|
67 |
+
# Basic fields plus additional ones
|
68 |
+
"PropertyName", "Address", "City", "State", "ZipCode",
|
69 |
+
"LeasableSquareFeet", "NumberOfRooms", "Beds", "Baths",
|
70 |
+
"PropertyStatus", "Description", "YearBuilt", "MarketValue",
|
71 |
+
"PropertyType", "ParkingSpaces", "PropertyManager",
|
72 |
+
"TaxAssessmentNumber", "Latitude", "Longitude", "CreateDate",
|
73 |
+
"LastModifiedDate", "ViewNumber", "Contact", "TotalSquareFeet"
|
74 |
+
},
|
75 |
+
UserPlan.PRO: {
|
76 |
+
# All fields
|
77 |
+
"PropertyName", "Address", "City", "State", "ZipCode",
|
78 |
+
"LeasableSquareFeet", "NumberOfRooms", "Beds", "Baths",
|
79 |
+
"PropertyStatus", "Description", "YearBuilt", "MarketValue",
|
80 |
+
"PropertyType", "ParkingSpaces", "PropertyManager",
|
81 |
+
"TaxAssessmentNumber", "Latitude", "Longitude", "CreateDate",
|
82 |
+
"LastModifiedDate", "ViewNumber", "Contact", "TotalSquareFeet",
|
83 |
+
"AgentName", "AgentPhoneNumber", "AgentEmail", "KeyFeatures",
|
84 |
+
"NearbyAmenities", "property_image",
|
85 |
+
"Distance", "IsDeleted"
|
86 |
+
}
|
87 |
+
}
|
modules/input_tracker.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from collections import defaultdict
|
3 |
+
import logging
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from datetime import datetime
|
7 |
+
from modules.config import PLAN_INPUT_LIMITS, UserPlan
|
8 |
+
|
9 |
+
# Configure logging
|
10 |
+
logging.basicConfig(level=logging.INFO)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
+
class UserInputTracker:
|
14 |
+
def __init__(self):
|
15 |
+
self.input_counts = defaultdict(lambda: {'count': 0, 'last_reset': time.time()})
|
16 |
+
self.session_data_file = 'session_data.json'
|
17 |
+
self.load_session_data()
|
18 |
+
|
19 |
+
def load_session_data(self):
|
20 |
+
"""Load session data from file if it exists"""
|
21 |
+
try:
|
22 |
+
if os.path.exists(self.session_data_file):
|
23 |
+
with open(self.session_data_file, 'r') as f:
|
24 |
+
data = json.load(f)
|
25 |
+
for session_id, session_info in data.items():
|
26 |
+
self.input_counts[session_id] = session_info
|
27 |
+
print(f"Loaded {len(data)} sessions from file")
|
28 |
+
except Exception as e:
|
29 |
+
logger.error(f"Error loading session data: {str(e)}")
|
30 |
+
|
31 |
+
def save_session_data(self):
|
32 |
+
"""Save session data to file"""
|
33 |
+
try:
|
34 |
+
with open(self.session_data_file, 'w') as f:
|
35 |
+
json.dump(dict(self.input_counts), f)
|
36 |
+
print("Session data saved successfully")
|
37 |
+
except Exception as e:
|
38 |
+
logger.error(f"Error saving session data: {str(e)}")
|
39 |
+
|
40 |
+
def can_accept_input(self, session_id, plan):
|
41 |
+
"""Check if the user can make another query based on their plan"""
|
42 |
+
self._check_reset(session_id)
|
43 |
+
max_inputs = self._get_max_inputs(plan)
|
44 |
+
current_count = self.input_counts[session_id]['count']
|
45 |
+
|
46 |
+
print(f"Session {session_id} - Plan: {plan}, Current count: {current_count}, Max inputs: {max_inputs}")
|
47 |
+
return current_count < max_inputs
|
48 |
+
|
49 |
+
def add_input(self, session_id, plan):
|
50 |
+
"""Add an input to the user's count"""
|
51 |
+
self._check_reset(session_id)
|
52 |
+
if self.can_accept_input(session_id, plan):
|
53 |
+
self.input_counts[session_id]['count'] += 1
|
54 |
+
self.save_session_data()
|
55 |
+
print(f"Added input for session {session_id}. New count: {self.input_counts[session_id]['count']}")
|
56 |
+
return True
|
57 |
+
return False
|
58 |
+
|
59 |
+
def get_remaining_inputs(self, session_id, plan):
|
60 |
+
"""Get the number of remaining inputs for the user"""
|
61 |
+
self._check_reset(session_id)
|
62 |
+
max_inputs = self._get_max_inputs(plan)
|
63 |
+
current_count = self.input_counts[session_id]['count']
|
64 |
+
remaining = max(0, max_inputs - current_count)
|
65 |
+
print(f"Session {session_id} - Remaining inputs: {remaining}")
|
66 |
+
return remaining
|
67 |
+
|
68 |
+
def get_usage_stats(self, session_id):
|
69 |
+
"""Get usage statistics for a session"""
|
70 |
+
try:
|
71 |
+
user_data = self.input_counts[session_id]
|
72 |
+
current_time = time.time()
|
73 |
+
remaining_time = 24 - ((current_time - user_data['last_reset']) / 3600)
|
74 |
+
|
75 |
+
return {
|
76 |
+
'total_used': user_data['count'],
|
77 |
+
'last_reset': datetime.fromtimestamp(user_data['last_reset']).isoformat(),
|
78 |
+
'remaining_time': remaining_time
|
79 |
+
}
|
80 |
+
except Exception as e:
|
81 |
+
logger.error(f"Error in get_usage_stats: {str(e)}")
|
82 |
+
return {
|
83 |
+
'total_used': 0,
|
84 |
+
'last_reset': datetime.fromtimestamp(time.time()).isoformat(),
|
85 |
+
'remaining_time': 24
|
86 |
+
}
|
87 |
+
|
88 |
+
def _check_reset(self, session_id):
|
89 |
+
"""Check if the 24-hour period has passed and reset if necessary"""
|
90 |
+
current_time = time.time()
|
91 |
+
last_reset = self.input_counts[session_id]['last_reset']
|
92 |
+
|
93 |
+
if current_time - last_reset >= 24 * 3600: # 24 hours in seconds
|
94 |
+
self.input_counts[session_id] = {'count': 0, 'last_reset': current_time}
|
95 |
+
self.save_session_data()
|
96 |
+
print(f"Reset count for session {session_id}")
|
97 |
+
|
98 |
+
def _get_max_inputs(self, plan):
|
99 |
+
"""Get the maximum number of inputs allowed for a plan"""
|
100 |
+
try:
|
101 |
+
# If plan is a UserPlan enum, get its value
|
102 |
+
if isinstance(plan, UserPlan):
|
103 |
+
plan = plan.value
|
104 |
+
|
105 |
+
# Convert plan to lowercase string for comparison
|
106 |
+
plan = str(plan).lower()
|
107 |
+
|
108 |
+
plan_limits = {
|
109 |
+
'basic': 5,
|
110 |
+
'plus': 20,
|
111 |
+
'pro': 50
|
112 |
+
}
|
113 |
+
return plan_limits.get(plan, 5) # Default to basic plan if unknown
|
114 |
+
except Exception as e:
|
115 |
+
logger.error(f"Error getting max inputs for plan {plan}: {str(e)}")
|
116 |
+
return 5 # Default to basic plan on error
|
117 |
+
|
118 |
+
def get_usage_stats(self, session_id):
|
119 |
+
"""Get usage statistics for a session"""
|
120 |
+
try:
|
121 |
+
user_data = self.input_counts[session_id]
|
122 |
+
return {
|
123 |
+
'total_used': user_data['count'],
|
124 |
+
'last_reset': user_data['last_reset'],
|
125 |
+
'recent_queries': [i['query'] for i in user_data['inputs'][-5:]] # Last 5 queries
|
126 |
+
}
|
127 |
+
except Exception as e:
|
128 |
+
logging.error(f"Error in get_usage_stats: {str(e)}")
|
129 |
+
return {
|
130 |
+
'total_used': 0,
|
131 |
+
'last_reset': time.time(),
|
132 |
+
'recent_queries': []
|
133 |
+
}
|
modules/location.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import geocoder
|
2 |
+
from geopy.distance import geodesic
|
3 |
+
from geopy.geocoders import Nominatim
|
4 |
+
import logging
|
5 |
+
|
6 |
+
def get_nearby_properties(latitude, longitude, df, top_k=5):
|
7 |
+
"""Get properties near a given location"""
|
8 |
+
try:
|
9 |
+
my_location = (latitude, longitude)
|
10 |
+
|
11 |
+
# Filter out rows with invalid coordinates
|
12 |
+
valid_properties = df[
|
13 |
+
df['Latitude'].notna() &
|
14 |
+
df['Longitude'].notna() &
|
15 |
+
df['Latitude'].apply(lambda x: isinstance(x, (int, float)) or (isinstance(x, str) and x.replace('.', '').isdigit())) &
|
16 |
+
df['Longitude'].apply(lambda x: isinstance(x, (int, float)) or (isinstance(x, str) and x.replace('.', '').isdigit()))
|
17 |
+
].copy()
|
18 |
+
|
19 |
+
# Convert coordinates to float
|
20 |
+
valid_properties['Latitude'] = valid_properties['Latitude'].astype(float)
|
21 |
+
valid_properties['Longitude'] = valid_properties['Longitude'].astype(float)
|
22 |
+
|
23 |
+
# Calculate distances
|
24 |
+
valid_properties['Distance'] = valid_properties.apply(
|
25 |
+
lambda row: geodesic(my_location, (row['Latitude'], row['Longitude'])).miles,
|
26 |
+
axis=1
|
27 |
+
)
|
28 |
+
|
29 |
+
# Get nearest properties
|
30 |
+
nearest_properties = valid_properties.nsmallest(top_k, 'Distance')
|
31 |
+
return nearest_properties
|
32 |
+
|
33 |
+
except Exception as e:
|
34 |
+
logging.error(f"Error getting nearby properties: {str(e)}")
|
35 |
+
return None
|
36 |
+
|
37 |
+
def get_location_details(latitude, longitude):
|
38 |
+
"""Get location details from coordinates"""
|
39 |
+
try:
|
40 |
+
geolocator = Nominatim(user_agent="hive_prop")
|
41 |
+
location = geolocator.reverse(f"{latitude}, {longitude}", language='en')
|
42 |
+
|
43 |
+
if location and location.raw.get('address'):
|
44 |
+
address = location.raw['address']
|
45 |
+
city = address.get('city') or address.get('town') or address.get('suburb') or address.get('county')
|
46 |
+
state = address.get('state')
|
47 |
+
country = address.get('country')
|
48 |
+
|
49 |
+
return {
|
50 |
+
'city': city,
|
51 |
+
'state': state,
|
52 |
+
'country': country
|
53 |
+
}
|
54 |
+
else:
|
55 |
+
return None
|
56 |
+
|
57 |
+
except Exception as e:
|
58 |
+
logging.error(f"Error getting location details: {str(e)}")
|
59 |
+
return None
|
60 |
+
|
61 |
+
def set_location(latitude, longitude, session_id, conversation_context):
|
62 |
+
"""Set location for a session"""
|
63 |
+
try:
|
64 |
+
location_details = get_location_details(latitude, longitude)
|
65 |
+
if location_details:
|
66 |
+
conversation_context[session_id] = {
|
67 |
+
'location': (latitude, longitude),
|
68 |
+
'city': location_details['city'],
|
69 |
+
'state': location_details['state'],
|
70 |
+
'country': location_details['country']
|
71 |
+
}
|
72 |
+
return True, location_details
|
73 |
+
return False, None
|
74 |
+
except Exception as e:
|
75 |
+
logging.error(f"Error setting location: {str(e)}")
|
76 |
+
return False, None
|
modules/location_processor.py
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from geopy.distance import geodesic
|
3 |
+
from geopy.geocoders import Nominatim
|
4 |
+
from typing import Dict, List, Tuple, Optional
|
5 |
+
import logging
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
import torch
|
8 |
+
import math
|
9 |
+
from modules.models import get_cached_properties
|
10 |
+
|
11 |
+
class LocationProcessor:
|
12 |
+
def __init__(self):
|
13 |
+
self.geolocator = Nominatim(user_agent="real_estate_app")
|
14 |
+
self.sentence_transformer = SentenceTransformer('all-MiniLM-L6-v2')
|
15 |
+
|
16 |
+
def get_location_details(self, latitude: float, longitude: float) -> Dict:
|
17 |
+
"""Get detailed location information from coordinates"""
|
18 |
+
try:
|
19 |
+
location = self.geolocator.reverse(f"{latitude}, {longitude}", language='en')
|
20 |
+
if location and location.raw.get('address'):
|
21 |
+
address = location.raw['address']
|
22 |
+
return {
|
23 |
+
'city': address.get('city') or address.get('town') or address.get('suburb'),
|
24 |
+
'state': address.get('state'),
|
25 |
+
'country': address.get('country'),
|
26 |
+
'postcode': address.get('postcode'),
|
27 |
+
'road': address.get('road'),
|
28 |
+
'neighbourhood': address.get('neighbourhood'),
|
29 |
+
'suburb': address.get('suburb')
|
30 |
+
}
|
31 |
+
except Exception as e:
|
32 |
+
logging.error(f"Error getting location details: {str(e)}")
|
33 |
+
return {}
|
34 |
+
|
35 |
+
def calculate_distance(self, lat1: float, lon1: float, lat2: float, lon2: float) -> float:
|
36 |
+
"""Calculate distance between two points using Haversine formula"""
|
37 |
+
try:
|
38 |
+
# Convert latitude and longitude from degrees to radians
|
39 |
+
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
|
40 |
+
|
41 |
+
# Haversine formula
|
42 |
+
dlat = lat2 - lat1
|
43 |
+
dlon = lon2 - lon1
|
44 |
+
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
|
45 |
+
c = 2 * math.asin(math.sqrt(a))
|
46 |
+
r = 6371 # Radius of earth in kilometers
|
47 |
+
|
48 |
+
return c * r
|
49 |
+
except Exception as e:
|
50 |
+
logging.error(f"Error calculating distance: {str(e)}")
|
51 |
+
return float('inf')
|
52 |
+
|
53 |
+
def find_nearby_properties(self, latitude: float, longitude: float, radius_km: float = 10.0) -> List[Dict]:
|
54 |
+
"""Find properties within specified radius of given coordinates"""
|
55 |
+
print(f"\n=== Finding nearby properties ===")
|
56 |
+
print(f"Searching within {radius_km}km of coordinates: {latitude}, {longitude}")
|
57 |
+
|
58 |
+
try:
|
59 |
+
properties = get_cached_properties()
|
60 |
+
if not properties:
|
61 |
+
print("No properties available in cache")
|
62 |
+
return []
|
63 |
+
|
64 |
+
nearby_properties = []
|
65 |
+
for prop in properties:
|
66 |
+
try:
|
67 |
+
# Get property location - handle both location object and direct lat/lon fields
|
68 |
+
prop_lat = None
|
69 |
+
prop_lon = None
|
70 |
+
|
71 |
+
# Try to get location from location object
|
72 |
+
location = prop.get('location', {})
|
73 |
+
if location and isinstance(location, dict):
|
74 |
+
prop_lat = location.get('latitude')
|
75 |
+
prop_lon = location.get('longitude')
|
76 |
+
|
77 |
+
# If not found in location object, try direct fields
|
78 |
+
if prop_lat is None or prop_lon is None:
|
79 |
+
prop_lat = prop.get('Latitude')
|
80 |
+
prop_lon = prop.get('Longitude')
|
81 |
+
|
82 |
+
# Skip if coordinates are missing or invalid
|
83 |
+
if not prop_lat or not prop_lon:
|
84 |
+
continue
|
85 |
+
|
86 |
+
try:
|
87 |
+
prop_lat = float(prop_lat)
|
88 |
+
prop_lon = float(prop_lon)
|
89 |
+
except (ValueError, TypeError):
|
90 |
+
continue
|
91 |
+
|
92 |
+
# Skip if coordinates are zero or invalid
|
93 |
+
if prop_lat == 0 or prop_lon == 0:
|
94 |
+
continue
|
95 |
+
|
96 |
+
# Calculate distance
|
97 |
+
distance = self.calculate_distance(latitude, longitude, prop_lat, prop_lon)
|
98 |
+
|
99 |
+
# Add distance to property data
|
100 |
+
prop['Distance'] = round(distance, 2)
|
101 |
+
|
102 |
+
# Check if property is within radius
|
103 |
+
if distance <= radius_km:
|
104 |
+
print(f"Found nearby property: {prop.get('propertyName', 'Unnamed Property')} at {distance}km")
|
105 |
+
nearby_properties.append(prop)
|
106 |
+
|
107 |
+
except Exception as e:
|
108 |
+
print(f"Error processing property: {str(e)}")
|
109 |
+
continue
|
110 |
+
|
111 |
+
# Sort by distance
|
112 |
+
nearby_properties.sort(key=lambda x: x.get('Distance', float('inf')))
|
113 |
+
print(f"Found {len(nearby_properties)} properties within {radius_km}km")
|
114 |
+
return nearby_properties
|
115 |
+
|
116 |
+
except Exception as e:
|
117 |
+
print(f"Error finding nearby properties: {str(e)}")
|
118 |
+
return []
|
119 |
+
|
120 |
+
def set_location(self, latitude: float, longitude: float, session_id: str) -> Dict:
|
121 |
+
"""Set user location and find nearby properties"""
|
122 |
+
print(f"\n=== Setting location ===")
|
123 |
+
print(f"Latitude: {latitude}")
|
124 |
+
print(f"Longitude: {longitude}")
|
125 |
+
print(f"Session ID: {session_id}")
|
126 |
+
|
127 |
+
try:
|
128 |
+
# Get location details
|
129 |
+
location_details = self.get_location_details(latitude, longitude)
|
130 |
+
print(f"Location details: {location_details}")
|
131 |
+
|
132 |
+
# Find nearby properties
|
133 |
+
nearby_properties = self.find_nearby_properties(latitude, longitude)
|
134 |
+
|
135 |
+
# Format the response
|
136 |
+
response = {
|
137 |
+
"status": "success",
|
138 |
+
"message": f"Found {len(nearby_properties)} properties nearby",
|
139 |
+
"location": location_details,
|
140 |
+
"properties": nearby_properties
|
141 |
+
}
|
142 |
+
|
143 |
+
# Add more detailed information if properties were found
|
144 |
+
if nearby_properties:
|
145 |
+
response["nearest_property"] = {
|
146 |
+
"name": nearby_properties[0].get('propertyName', 'Unnamed Property'),
|
147 |
+
"distance": nearby_properties[0].get('Distance', 0),
|
148 |
+
"address": nearby_properties[0].get('Address', 'No address available')
|
149 |
+
}
|
150 |
+
|
151 |
+
return response
|
152 |
+
|
153 |
+
except Exception as e:
|
154 |
+
print(f"Error in set_location: {str(e)}")
|
155 |
+
return {
|
156 |
+
"status": "error",
|
157 |
+
"message": "Error processing location",
|
158 |
+
"properties": []
|
159 |
+
}
|
160 |
+
|
161 |
+
def calculate_distances(self,
|
162 |
+
reference_point: Tuple[float, float],
|
163 |
+
properties: List[Dict]) -> List[Dict]:
|
164 |
+
"""Calculate distances between reference point and properties"""
|
165 |
+
distances = []
|
166 |
+
for property_data in properties:
|
167 |
+
try:
|
168 |
+
prop_lat = float(property_data.get('Latitude', 0))
|
169 |
+
prop_lon = float(property_data.get('Longitude', 0))
|
170 |
+
|
171 |
+
if prop_lat and prop_lon:
|
172 |
+
distance = geodesic(reference_point, (prop_lat, prop_lon)).miles
|
173 |
+
property_data['Distance'] = round(distance, 2)
|
174 |
+
distances.append(property_data)
|
175 |
+
except (ValueError, TypeError) as e:
|
176 |
+
logging.error(f"Error calculating distance: {str(e)}")
|
177 |
+
continue
|
178 |
+
|
179 |
+
return sorted(distances, key=lambda x: x.get('Distance', float('inf')))
|
180 |
+
|
181 |
+
def find_nearby_landmarks(self,
|
182 |
+
latitude: float,
|
183 |
+
longitude: float,
|
184 |
+
radius_miles: float = 5.0) -> List[Dict]:
|
185 |
+
"""Find landmarks near a given location"""
|
186 |
+
try:
|
187 |
+
# Use Nominatim to search for nearby places
|
188 |
+
query = f"amenity near {latitude}, {longitude}"
|
189 |
+
places = self.geolocator.geocode(query, exactly_one=False, limit=10)
|
190 |
+
|
191 |
+
landmarks = []
|
192 |
+
if places:
|
193 |
+
for place in places:
|
194 |
+
try:
|
195 |
+
place_lat = float(place.raw.get('lat', 0))
|
196 |
+
place_lon = float(place.raw.get('lon', 0))
|
197 |
+
|
198 |
+
if place_lat and place_lon:
|
199 |
+
distance = geodesic((latitude, longitude),
|
200 |
+
(place_lat, place_lon)).miles
|
201 |
+
|
202 |
+
if distance <= radius_miles:
|
203 |
+
landmarks.append({
|
204 |
+
'name': place.raw.get('display_name', 'Unknown'),
|
205 |
+
'type': place.raw.get('type', 'Unknown'),
|
206 |
+
'distance': round(distance, 2)
|
207 |
+
})
|
208 |
+
except (ValueError, TypeError):
|
209 |
+
continue
|
210 |
+
|
211 |
+
return sorted(landmarks, key=lambda x: x['distance'])
|
212 |
+
|
213 |
+
except Exception as e:
|
214 |
+
logging.error(f"Error finding nearby landmarks: {str(e)}")
|
215 |
+
return []
|
216 |
+
|
217 |
+
def filter_by_location_criteria(self,
|
218 |
+
properties: List[Dict],
|
219 |
+
criteria: Dict) -> List[Dict]:
|
220 |
+
"""Filter properties based on location criteria"""
|
221 |
+
filtered_properties = []
|
222 |
+
|
223 |
+
for property_data in properties:
|
224 |
+
try:
|
225 |
+
# Check if property meets all criteria
|
226 |
+
meets_criteria = True
|
227 |
+
|
228 |
+
# Check distance if specified
|
229 |
+
if 'max_distance' in criteria:
|
230 |
+
if property_data.get('Distance', float('inf')) > criteria['max_distance']:
|
231 |
+
meets_criteria = False
|
232 |
+
|
233 |
+
# Check landmarks if specified
|
234 |
+
if 'nearby_landmarks' in criteria:
|
235 |
+
property_landmarks = self.find_nearby_landmarks(
|
236 |
+
float(property_data.get('Latitude', 0)),
|
237 |
+
float(property_data.get('Longitude', 0))
|
238 |
+
)
|
239 |
+
landmark_names = [l['name'].lower() for l in property_landmarks]
|
240 |
+
if not any(landmark.lower() in landmark_names
|
241 |
+
for landmark in criteria['nearby_landmarks']):
|
242 |
+
meets_criteria = False
|
243 |
+
|
244 |
+
if meets_criteria:
|
245 |
+
filtered_properties.append(property_data)
|
246 |
+
|
247 |
+
except Exception as e:
|
248 |
+
logging.error(f"Error filtering property: {str(e)}")
|
249 |
+
continue
|
250 |
+
|
251 |
+
return filtered_properties
|
252 |
+
|
253 |
+
def get_location_embedding(self, location_text: str) -> np.ndarray:
|
254 |
+
"""Get embedding for location text"""
|
255 |
+
return self.sentence_transformer.encode(location_text)
|
256 |
+
|
257 |
+
def find_similar_locations(self,
|
258 |
+
reference_location: str,
|
259 |
+
candidate_locations: List[str],
|
260 |
+
top_k: int = 5) -> List[Tuple[str, float]]:
|
261 |
+
"""Find locations similar to reference location"""
|
262 |
+
ref_embedding = self.get_location_embedding(reference_location)
|
263 |
+
candidate_embeddings = self.sentence_transformer.encode(candidate_locations)
|
264 |
+
|
265 |
+
similarities = []
|
266 |
+
for location, embedding in zip(candidate_locations, candidate_embeddings):
|
267 |
+
similarity = np.dot(ref_embedding, embedding) / (
|
268 |
+
np.linalg.norm(ref_embedding) * np.linalg.norm(embedding)
|
269 |
+
)
|
270 |
+
similarities.append((location, float(similarity)))
|
271 |
+
|
272 |
+
return sorted(similarities, key=lambda x: x[1], reverse=True)[:top_k]
|
273 |
+
|
274 |
+
def find_nearby_properties(latitude: float, longitude: float, radius_km: float = 5.0) -> List[Dict]:
|
275 |
+
"""Find properties within specified radius of given coordinates"""
|
276 |
+
print(f"\n=== Finding nearby properties ===")
|
277 |
+
print(f"Searching within {radius_km}km of coordinates: {latitude}, {longitude}")
|
278 |
+
|
279 |
+
try:
|
280 |
+
properties = get_cached_properties()
|
281 |
+
if not properties:
|
282 |
+
print("No properties available in cache")
|
283 |
+
return []
|
284 |
+
|
285 |
+
nearby_properties = []
|
286 |
+
for prop in properties:
|
287 |
+
try:
|
288 |
+
# Get property location
|
289 |
+
location = prop.get('location', {})
|
290 |
+
if not location:
|
291 |
+
continue
|
292 |
+
|
293 |
+
prop_lat = float(location.get('latitude', 0))
|
294 |
+
prop_lon = float(location.get('longitude', 0))
|
295 |
+
|
296 |
+
if prop_lat == 0 or prop_lon == 0:
|
297 |
+
continue
|
298 |
+
|
299 |
+
# Calculate distance
|
300 |
+
distance = calculate_distance(latitude, longitude, prop_lat, prop_lon)
|
301 |
+
|
302 |
+
# Add distance to property data
|
303 |
+
prop['Distance'] = round(distance, 2)
|
304 |
+
|
305 |
+
# Check if property is within radius
|
306 |
+
if distance <= radius_km:
|
307 |
+
print(f"Found nearby property: {prop.get('propertyName')} at {distance}km")
|
308 |
+
nearby_properties.append(prop)
|
309 |
+
|
310 |
+
except Exception as e:
|
311 |
+
print(f"Error processing property: {str(e)}")
|
312 |
+
continue
|
313 |
+
|
314 |
+
# Sort by distance
|
315 |
+
nearby_properties.sort(key=lambda x: x.get('Distance', float('inf')))
|
316 |
+
print(f"Found {len(nearby_properties)} properties within {radius_km}km")
|
317 |
+
return nearby_properties
|
318 |
+
|
319 |
+
except Exception as e:
|
320 |
+
print(f"Error finding nearby properties: {str(e)}")
|
321 |
+
return []
|
322 |
+
|
323 |
+
def set_location(latitude: float, longitude: float, session_id: str) -> Dict:
|
324 |
+
"""Set user location and find nearby properties"""
|
325 |
+
print(f"\n=== Setting location ===")
|
326 |
+
print(f"Latitude: {latitude}")
|
327 |
+
print(f"Longitude: {longitude}")
|
328 |
+
print(f"Session ID: {session_id}")
|
329 |
+
|
330 |
+
try:
|
331 |
+
# Find nearby properties
|
332 |
+
nearby_properties = find_nearby_properties(latitude, longitude)
|
333 |
+
|
334 |
+
return {
|
335 |
+
"status": "success",
|
336 |
+
"message": f"Found {len(nearby_properties)} properties nearby",
|
337 |
+
"properties": nearby_properties
|
338 |
+
}
|
339 |
+
|
340 |
+
except Exception as e:
|
341 |
+
print(f"Error in set_location: {str(e)}")
|
342 |
+
return {
|
343 |
+
"status": "error",
|
344 |
+
"message": "Error processing location",
|
345 |
+
"properties": []
|
346 |
+
}
|
modules/models.py
ADDED
@@ -0,0 +1,703 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import faiss
|
3 |
+
import pandas as pd
|
4 |
+
import requests
|
5 |
+
import json
|
6 |
+
import urllib3
|
7 |
+
from sentence_transformers import SentenceTransformer, util
|
8 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
9 |
+
from modules.config import (
|
10 |
+
MODEL_DIR,
|
11 |
+
LLM_MODEL_DIR,
|
12 |
+
FEATURE_MATCHER_DIR,
|
13 |
+
BASE_MODEL_DIR,
|
14 |
+
SEMANTIC_MODEL_DIR
|
15 |
+
)
|
16 |
+
from modules.parallel import ModelParallelizer, parallel_map, batch_process, get_device
|
17 |
+
import os
|
18 |
+
import pickle
|
19 |
+
import numpy as np
|
20 |
+
import logging
|
21 |
+
from sklearn.decomposition import PCA
|
22 |
+
from pathlib import Path
|
23 |
+
from typing import Dict, Any, Tuple, List
|
24 |
+
|
25 |
+
# Configure logging
|
26 |
+
logging.basicConfig(level=logging.INFO)
|
27 |
+
logger = logging.getLogger(__name__)
|
28 |
+
|
29 |
+
# Disable SSL warnings for development
|
30 |
+
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
31 |
+
|
32 |
+
# Check device
|
33 |
+
device = get_device()
|
34 |
+
print(f"Using device: {device}")
|
35 |
+
|
36 |
+
# Global variables
|
37 |
+
model_embedding = None
|
38 |
+
model_parallelizer = None
|
39 |
+
properties_cache = None
|
40 |
+
property_embeddings = None
|
41 |
+
|
42 |
+
# API Configuration
|
43 |
+
API_BASE_URL = os.getenv("API_BASE_URL", "https://99e3-171-78-177-251.ngrok-free.app/api/Property")
|
44 |
+
API_ENDPOINT = f"{API_BASE_URL}/allPropertieswithfulldetails"
|
45 |
+
|
46 |
+
class SearchSystem:
|
47 |
+
def __init__(self, model_path: str = None, index_path: str = None, pca_path: str = None):
|
48 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
49 |
+
self.model_path = Path(model_path) if model_path else Path("models/saved_models/search")
|
50 |
+
self.index_path = Path(index_path) if index_path else self.model_path / "faiss_index.bin"
|
51 |
+
self.pca_path = Path(pca_path) if pca_path else self.model_path / "pca_model.pkl"
|
52 |
+
|
53 |
+
self.model = None
|
54 |
+
self.index = None
|
55 |
+
self.pca = None
|
56 |
+
self.dimension = 384 # Default dimension for all-MiniLM-L6-v2
|
57 |
+
|
58 |
+
def load_models(self):
|
59 |
+
"""Load all models and index"""
|
60 |
+
try:
|
61 |
+
# Load base model
|
62 |
+
self.model = SentenceTransformer('all-MiniLM-L6-v2').to(self.device)
|
63 |
+
|
64 |
+
# Load FAISS index
|
65 |
+
if self.index_path.exists():
|
66 |
+
self.index = faiss.read_index(str(self.index_path))
|
67 |
+
else:
|
68 |
+
logger.warning(f"FAISS index not found at {self.index_path}")
|
69 |
+
|
70 |
+
# Load PCA model
|
71 |
+
if self.pca_path.exists():
|
72 |
+
with open(self.pca_path, 'rb') as f:
|
73 |
+
self.pca = pickle.load(f)
|
74 |
+
else:
|
75 |
+
logger.warning(f"PCA model not found at {self.pca_path}")
|
76 |
+
|
77 |
+
logger.info("Successfully loaded all models")
|
78 |
+
|
79 |
+
except Exception as e:
|
80 |
+
logger.error(f"Error loading models: {str(e)}")
|
81 |
+
raise
|
82 |
+
|
83 |
+
def train_pca(self, embeddings: np.ndarray, n_components: int = 128):
|
84 |
+
"""Train PCA model on embeddings"""
|
85 |
+
try:
|
86 |
+
self.pca = PCA(n_components=n_components)
|
87 |
+
self.pca.fit(embeddings)
|
88 |
+
|
89 |
+
# Save PCA model
|
90 |
+
self.model_path.mkdir(parents=True, exist_ok=True)
|
91 |
+
with open(self.pca_path, 'wb') as f:
|
92 |
+
pickle.dump(self.pca, f)
|
93 |
+
|
94 |
+
logger.info(f"Successfully trained and saved PCA model with {n_components} components")
|
95 |
+
|
96 |
+
except Exception as e:
|
97 |
+
logger.error(f"Error training PCA: {str(e)}")
|
98 |
+
raise
|
99 |
+
|
100 |
+
def transform_embeddings(self, embeddings: np.ndarray) -> np.ndarray:
|
101 |
+
"""Transform embeddings using PCA if available"""
|
102 |
+
if self.pca is not None:
|
103 |
+
return self.pca.transform(embeddings)
|
104 |
+
return embeddings
|
105 |
+
|
106 |
+
def create_embeddings(self, texts: List[str]) -> np.ndarray:
|
107 |
+
"""Create embeddings for texts"""
|
108 |
+
embeddings = self.model.encode(texts, convert_to_numpy=True)
|
109 |
+
return self.transform_embeddings(embeddings)
|
110 |
+
|
111 |
+
def search(self, query: str, k: int = 5) -> Tuple[np.ndarray, np.ndarray]:
|
112 |
+
"""Search for similar texts"""
|
113 |
+
if self.index is None:
|
114 |
+
raise ValueError("FAISS index not loaded")
|
115 |
+
|
116 |
+
# Create query embedding
|
117 |
+
query_embedding = self.create_embeddings([query])[0]
|
118 |
+
|
119 |
+
# Search
|
120 |
+
distances, indices = self.index.search(query_embedding.reshape(1, -1), k)
|
121 |
+
return distances[0], indices[0]
|
122 |
+
|
123 |
+
def save_index(self, embeddings: np.ndarray):
|
124 |
+
"""Create and save FAISS index"""
|
125 |
+
try:
|
126 |
+
# Create index
|
127 |
+
dimension = embeddings.shape[1]
|
128 |
+
self.index = faiss.IndexFlatL2(dimension)
|
129 |
+
self.index.add(embeddings.astype('float32'))
|
130 |
+
|
131 |
+
# Save index
|
132 |
+
self.model_path.mkdir(parents=True, exist_ok=True)
|
133 |
+
faiss.write_index(self.index, str(self.index_path))
|
134 |
+
|
135 |
+
logger.info(f"Successfully created and saved FAISS index with dimension {dimension}")
|
136 |
+
|
137 |
+
except Exception as e:
|
138 |
+
logger.error(f"Error creating FAISS index: {str(e)}")
|
139 |
+
raise
|
140 |
+
|
141 |
+
def fetch_and_cache_properties():
|
142 |
+
"""Fetch properties from API and cache them"""
|
143 |
+
global properties_cache
|
144 |
+
try:
|
145 |
+
print("Fetching properties from API...")
|
146 |
+
|
147 |
+
# Configure session with retry mechanism
|
148 |
+
session = requests.Session()
|
149 |
+
session.verify = False
|
150 |
+
|
151 |
+
# Add headers for better API communication
|
152 |
+
headers = {
|
153 |
+
'Accept': 'application/json',
|
154 |
+
'Content-Type': 'application/json',
|
155 |
+
'ngrok-skip-browser-warning': 'true'
|
156 |
+
}
|
157 |
+
|
158 |
+
# Make the API request with increased page size
|
159 |
+
response = session.get(
|
160 |
+
API_ENDPOINT,
|
161 |
+
params={"pageNumber": 1, "pageSize": 500}, # Increased page size to ensure we get enough properties
|
162 |
+
headers=headers
|
163 |
+
)
|
164 |
+
|
165 |
+
# Check for successful response
|
166 |
+
response.raise_for_status()
|
167 |
+
|
168 |
+
# Parse and validate response
|
169 |
+
data = response.json()
|
170 |
+
if not isinstance(data, dict) or 'data' not in data:
|
171 |
+
raise ValueError("Invalid API response format")
|
172 |
+
|
173 |
+
properties_cache = data["data"]
|
174 |
+
if not properties_cache:
|
175 |
+
raise ValueError("No properties found in API response")
|
176 |
+
|
177 |
+
print(f"Successfully cached {len(properties_cache)} properties")
|
178 |
+
print("\nSample Raw API Response (First Property):")
|
179 |
+
print(json.dumps(properties_cache[0], indent=2))
|
180 |
+
return properties_cache
|
181 |
+
|
182 |
+
except Exception as e:
|
183 |
+
logger.error(f"Error fetching properties: {str(e)}")
|
184 |
+
return []
|
185 |
+
|
186 |
+
def get_cached_properties():
|
187 |
+
"""Get cached properties or fetch if not available"""
|
188 |
+
global properties_cache
|
189 |
+
if properties_cache is None:
|
190 |
+
properties_cache = fetch_and_cache_properties()
|
191 |
+
return properties_cache or []
|
192 |
+
|
193 |
+
def create_property_embeddings(properties, model):
|
194 |
+
"""Create embeddings for properties and store in FAISS index"""
|
195 |
+
global property_embeddings
|
196 |
+
try:
|
197 |
+
print("\n=== Creating property embeddings ===")
|
198 |
+
|
199 |
+
# Prepare property texts for embedding
|
200 |
+
property_texts = []
|
201 |
+
for prop in properties:
|
202 |
+
# Create a rich text representation of the property
|
203 |
+
text = f"""
|
204 |
+
Property Name: {prop.get('propertyName', 'N/A')}
|
205 |
+
Type: {prop.get('typeName', 'N/A')}
|
206 |
+
Description: {prop.get('description', 'N/A')}
|
207 |
+
Address: {prop.get('address', 'N/A')}
|
208 |
+
|
209 |
+
Location Information:
|
210 |
+
Full Address: {prop.get('location', {}).get('address', 'N/A')}
|
211 |
+
Latitude: {prop.get('location', {}).get('latitude', 'N/A')}
|
212 |
+
Longitude: {prop.get('location', {}).get('longitude', 'N/A')}
|
213 |
+
|
214 |
+
Property Details:
|
215 |
+
Total Square Feet: {prop.get('totalSquareFeet', 'N/A')}
|
216 |
+
Number of Rooms: {prop.get('numberOfRooms', 'N/A')}
|
217 |
+
Market Value: {prop.get('marketValue', 'N/A')}
|
218 |
+
|
219 |
+
Features: {', '.join(prop.get('features', []))}
|
220 |
+
|
221 |
+
PG Property Details:
|
222 |
+
{format_pg_details(prop.get('pgPropertyDetails', {}))}
|
223 |
+
|
224 |
+
Commercial Property Details:
|
225 |
+
{format_commercial_details(prop.get('commercialPropertyDetails', {}))}
|
226 |
+
"""
|
227 |
+
property_texts.append(text)
|
228 |
+
|
229 |
+
print(f"Created text representations for {len(property_texts)} properties")
|
230 |
+
|
231 |
+
# Create embeddings in batches
|
232 |
+
embeddings = []
|
233 |
+
batch_size = 32
|
234 |
+
for i in range(0, len(property_texts), batch_size):
|
235 |
+
batch = property_texts[i:i + batch_size]
|
236 |
+
print(f"Processing batch {i//batch_size + 1}/{(len(property_texts) + batch_size - 1)//batch_size}")
|
237 |
+
batch_embeddings = model.encode(batch, convert_to_numpy=True)
|
238 |
+
embeddings.extend(batch_embeddings)
|
239 |
+
|
240 |
+
property_embeddings = np.array(embeddings).astype('float32')
|
241 |
+
print(f"Created embeddings for {len(property_embeddings)} properties")
|
242 |
+
|
243 |
+
# Create and save FAISS index
|
244 |
+
dimension = property_embeddings.shape[1]
|
245 |
+
print(f"Creating FAISS index with dimension {dimension}")
|
246 |
+
index = faiss.IndexFlatL2(dimension)
|
247 |
+
index.add(property_embeddings)
|
248 |
+
|
249 |
+
# Save the index
|
250 |
+
index_path = os.path.join(MODEL_DIR, "property_index.faiss")
|
251 |
+
faiss.write_index(index, index_path)
|
252 |
+
print(f"Saved FAISS index to {index_path}")
|
253 |
+
|
254 |
+
return index
|
255 |
+
|
256 |
+
except Exception as e:
|
257 |
+
print(f"Error creating property embeddings: {str(e)}")
|
258 |
+
raise
|
259 |
+
|
260 |
+
def format_pg_details(pg_details):
|
261 |
+
"""Format PG property details into text"""
|
262 |
+
if not pg_details:
|
263 |
+
return "N/A"
|
264 |
+
|
265 |
+
return f"""
|
266 |
+
Deposit: {pg_details.get('depositAmount', 'N/A')}
|
267 |
+
Food Included: {pg_details.get('foodIncluded', 'N/A')}
|
268 |
+
Food Type: {pg_details.get('foodAvailability', 'N/A')}
|
269 |
+
WiFi: {pg_details.get('wifiAvailable', 'N/A')}
|
270 |
+
AC: {pg_details.get('isACAvailable', 'N/A')}
|
271 |
+
Parking: {pg_details.get('isParkingAvailable', 'N/A')}
|
272 |
+
Power Backup: {pg_details.get('powerBackup', 'N/A')}
|
273 |
+
Available For: {pg_details.get('availableFor', 'N/A')}
|
274 |
+
Total Beds: {pg_details.get('totalBeds', 'N/A')}
|
275 |
+
"""
|
276 |
+
|
277 |
+
def format_commercial_details(commercial_details):
|
278 |
+
"""Format commercial property details into text"""
|
279 |
+
if not commercial_details:
|
280 |
+
return "N/A"
|
281 |
+
|
282 |
+
return f"""
|
283 |
+
Washrooms: {commercial_details.get('washrooms', 'N/A')}
|
284 |
+
Floor Details: {commercial_details.get('floorDetails', 'N/A')}
|
285 |
+
Parking: {commercial_details.get('hasParking', 'N/A')}
|
286 |
+
Parking Capacity: {commercial_details.get('parkingCapacity', 'N/A')}
|
287 |
+
Facing: {commercial_details.get('facing', 'N/A')}
|
288 |
+
Lift: {commercial_details.get('hasLift', 'N/A')}
|
289 |
+
Furnished: {commercial_details.get('isFurnished', 'N/A')}
|
290 |
+
"""
|
291 |
+
|
292 |
+
def load_sentence_transformer():
|
293 |
+
global model_embedding, model_parallelizer
|
294 |
+
print("\n=== Loading SentenceTransformer model ===")
|
295 |
+
try:
|
296 |
+
# Load base model first
|
297 |
+
base_model_path = os.path.join(BASE_MODEL_DIR, "model.safetensors")
|
298 |
+
print(f"Loading base model from: {base_model_path}")
|
299 |
+
|
300 |
+
# Load semantic model
|
301 |
+
semantic_model_path = os.path.join(SEMANTIC_MODEL_DIR, "model.safetensors")
|
302 |
+
print(f"Loading semantic model from: {semantic_model_path}")
|
303 |
+
|
304 |
+
# Initialize with default model first
|
305 |
+
model_embedding = SentenceTransformer("jinaai/jina-embeddings-v3", trust_remote_code=True).to(device)
|
306 |
+
print("Loaded default model successfully")
|
307 |
+
|
308 |
+
# Try to load base model if exists
|
309 |
+
if os.path.exists(base_model_path):
|
310 |
+
print("Loading base model state dict...")
|
311 |
+
try:
|
312 |
+
state_dict = torch.load(base_model_path, map_location=device)
|
313 |
+
model_embedding.load_state_dict(state_dict)
|
314 |
+
print("Base model loaded successfully")
|
315 |
+
except Exception as e:
|
316 |
+
print(f"Error loading base model: {str(e)}")
|
317 |
+
else:
|
318 |
+
print(f"Base model not found at {base_model_path}")
|
319 |
+
|
320 |
+
# Try to load semantic model if exists
|
321 |
+
if os.path.exists(semantic_model_path):
|
322 |
+
print("Loading semantic model state dict...")
|
323 |
+
try:
|
324 |
+
semantic_state_dict = torch.load(semantic_model_path, map_location=device)
|
325 |
+
model_embedding.load_state_dict(semantic_state_dict, strict=False)
|
326 |
+
print("Semantic model loaded successfully")
|
327 |
+
except Exception as e:
|
328 |
+
print(f"Error loading semantic model: {str(e)}")
|
329 |
+
else:
|
330 |
+
print(f"Semantic model not found at {semantic_model_path}")
|
331 |
+
|
332 |
+
# Initialize parallelizer
|
333 |
+
model_parallelizer = ModelParallelizer(model_embedding)
|
334 |
+
print("Model parallelizer initialized")
|
335 |
+
return model_embedding
|
336 |
+
except Exception as e:
|
337 |
+
print(f"Error loading model: {str(e)}")
|
338 |
+
raise
|
339 |
+
|
340 |
+
def load_faiss_index():
|
341 |
+
print("\n=== Loading FAISS index ===")
|
342 |
+
try:
|
343 |
+
index_path = os.path.join(MODEL_DIR, "property_index.faiss")
|
344 |
+
print(f"Looking for FAISS index at: {index_path}")
|
345 |
+
|
346 |
+
if os.path.exists(index_path):
|
347 |
+
index = faiss.read_index(index_path)
|
348 |
+
print("FAISS index loaded successfully")
|
349 |
+
return index
|
350 |
+
else:
|
351 |
+
print("FAISS index not found, creating new index...")
|
352 |
+
# Fetch properties and create new index
|
353 |
+
properties = get_cached_properties()
|
354 |
+
if not properties:
|
355 |
+
raise ValueError("No properties available to create index")
|
356 |
+
model = load_sentence_transformer()
|
357 |
+
return create_property_embeddings(properties, model)
|
358 |
+
except Exception as e:
|
359 |
+
print(f"Error loading FAISS index: {str(e)}")
|
360 |
+
raise
|
361 |
+
|
362 |
+
def load_pca_model():
|
363 |
+
print("Loading PCA model...")
|
364 |
+
try:
|
365 |
+
pca_path = os.path.join(MODEL_DIR, "pca_model.pkl")
|
366 |
+
if os.path.exists(pca_path):
|
367 |
+
with open(pca_path, 'rb') as f:
|
368 |
+
pca = pickle.load(f)
|
369 |
+
print("PCA model loaded successfully.")
|
370 |
+
return pca
|
371 |
+
return None
|
372 |
+
except Exception as e:
|
373 |
+
logger.error(f"Error loading PCA model: {str(e)}")
|
374 |
+
return None
|
375 |
+
|
376 |
+
def load_search_system(model_path: str = None, index_path: str = None, pca_path: str = None) -> SearchSystem:
|
377 |
+
"""Load the entire search system"""
|
378 |
+
system = SearchSystem(model_path, index_path, pca_path)
|
379 |
+
system.load_models()
|
380 |
+
return system
|
381 |
+
|
382 |
+
def format_property_details(property_data):
|
383 |
+
"""Format property details with all available information"""
|
384 |
+
try:
|
385 |
+
# Extract location details safely
|
386 |
+
location = property_data.get('location', {})
|
387 |
+
address_parts = location.get('address', '').split(',') if location.get('address') else []
|
388 |
+
|
389 |
+
# Ensure we have enough parts for address parsing
|
390 |
+
while len(address_parts) < 4:
|
391 |
+
address_parts.append('N/A')
|
392 |
+
|
393 |
+
formatted_property = {
|
394 |
+
# Basic Information
|
395 |
+
"PropertyName": property_data.get('propertyName', 'Unknown'),
|
396 |
+
"Address": property_data.get('address', 'N/A'),
|
397 |
+
"ZipCode": address_parts[-1].strip() if address_parts else 'N/A',
|
398 |
+
"LeasableSquareFeet": float(property_data.get('totalSquareFeet', 0)),
|
399 |
+
"YearBuilt": property_data.get('yearBuilt', None),
|
400 |
+
"NumberOfRooms": int(property_data.get('numberOfRooms', 0)),
|
401 |
+
"ParkingSpaces": int(property_data.get('commercialPropertyDetails', {}).get('parkingCapacity', 0)) if property_data.get('commercialPropertyDetails') else 0,
|
402 |
+
"PropertyManager": property_data.get('agents', [{}])[0].get('name', 'N/A') if property_data.get('agents') else 'N/A',
|
403 |
+
"MarketValue": float(property_data.get('marketValue', 0)),
|
404 |
+
"TaxAssessmentNumber": None, # Not available in API
|
405 |
+
"Latitude": float(location.get('latitude', 0)) if location.get('latitude') is not None else 0.0,
|
406 |
+
"Longitude": float(location.get('longitude', 0)) if location.get('longitude') is not None else 0.0,
|
407 |
+
"CreateDate": property_data.get('date', 'N/A'),
|
408 |
+
"LastModifiedDate": property_data.get('date', 'N/A'),
|
409 |
+
"City": address_parts[1].strip() if len(address_parts) > 1 else 'N/A',
|
410 |
+
"State": address_parts[2].strip() if len(address_parts) > 2 else 'N/A',
|
411 |
+
"Country": address_parts[3].strip() if len(address_parts) > 3 else 'N/A',
|
412 |
+
"PropertyType": property_data.get('typeName', 'N/A'),
|
413 |
+
"PropertyStatus": property_data.get('parentCategoryName', 'N/A'),
|
414 |
+
"Description": property_data.get('description', 'N/A'),
|
415 |
+
"ViewNumber": 0, # Not available in API
|
416 |
+
"Contact": property_data.get('agents', [{}])[0].get('phoneNumber', 'N/A') if property_data.get('agents') else 'N/A',
|
417 |
+
"TotalSquareFeet": float(property_data.get('totalSquareFeet', 0)),
|
418 |
+
"IsDeleted": False, # Not available in API
|
419 |
+
"Beds": int(property_data.get('beds', 0)), # Updated to use beds instead of numberOfRooms
|
420 |
+
"Baths": int(property_data.get('baths', 0)), # Updated to use baths directly
|
421 |
+
"AgentName": property_data.get('agents', [{}])[0].get('name', 'N/A') if property_data.get('agents') else 'N/A',
|
422 |
+
"AgentPhoneNumber": property_data.get('agents', [{}])[0].get('phoneNumber', 'N/A') if property_data.get('agents') else 'N/A',
|
423 |
+
"AgentEmail": property_data.get('agents', [{}])[0].get('email', 'N/A') if property_data.get('agents') else 'N/A',
|
424 |
+
"KeyFeatures": ', '.join(property_data.get('features', [])) if property_data.get('features') else 'N/A',
|
425 |
+
"NearbyAmenities": property_data.get('description', 'N/A'),
|
426 |
+
"propertyImages": property_data.get('propertyImages', []),
|
427 |
+
|
428 |
+
# PG Property Details
|
429 |
+
"PGDetails": {
|
430 |
+
"DepositAmount": property_data.get('pgPropertyDetails', {}).get('depositAmount', 'N/A'),
|
431 |
+
"FoodIncluded": property_data.get('pgPropertyDetails', {}).get('foodIncluded', 'N/A'),
|
432 |
+
"FoodType": property_data.get('pgPropertyDetails', {}).get('foodAvailability', 'N/A'),
|
433 |
+
"WifiAvailable": property_data.get('pgPropertyDetails', {}).get('wifiAvailable', 'N/A'),
|
434 |
+
"ACAvailable": property_data.get('pgPropertyDetails', {}).get('isACAvailable', 'N/A'),
|
435 |
+
"ParkingAvailable": property_data.get('pgPropertyDetails', {}).get('isParkingAvailable', 'N/A'),
|
436 |
+
"PowerBackup": property_data.get('pgPropertyDetails', {}).get('powerBackup', 'N/A'),
|
437 |
+
"AvailableFor": property_data.get('pgPropertyDetails', {}).get('availableFor', 'N/A'),
|
438 |
+
"TotalBeds": property_data.get('pgPropertyDetails', {}).get('totalBeds', 'N/A'),
|
439 |
+
"OperatingSince": property_data.get('pgPropertyDetails', {}).get('operatingSince', 'N/A'),
|
440 |
+
"NoticePeriod": property_data.get('pgPropertyDetails', {}).get('noticePeriod', 'N/A'),
|
441 |
+
"PreferredTenants": property_data.get('pgPropertyDetails', {}).get('preferredTenants', 'N/A')
|
442 |
+
} if property_data.get('pgPropertyDetails') else None,
|
443 |
+
|
444 |
+
# Commercial Property Details
|
445 |
+
"CommercialDetails": {
|
446 |
+
"Washrooms": property_data.get('commercialPropertyDetails', {}).get('washrooms', 'N/A'),
|
447 |
+
"FloorDetails": property_data.get('commercialPropertyDetails', {}).get('floorDetails', 'N/A'),
|
448 |
+
"HasParking": property_data.get('commercialPropertyDetails', {}).get('hasParking', 'N/A'),
|
449 |
+
"ParkingCapacity": property_data.get('commercialPropertyDetails', {}).get('parkingCapacity', 'N/A'),
|
450 |
+
"Facing": property_data.get('commercialPropertyDetails', {}).get('facing', 'N/A'),
|
451 |
+
"HasLift": property_data.get('commercialPropertyDetails', {}).get('hasLift', 'N/A'),
|
452 |
+
"IsFurnished": property_data.get('commercialPropertyDetails', {}).get('isFurnished', 'N/A'),
|
453 |
+
"Overlooking": property_data.get('commercialPropertyDetails', {}).get('overlooking', 'N/A'),
|
454 |
+
"MonthlyRent": property_data.get('commercialPropertyDetails', {}).get('monthlyRent', 'N/A'),
|
455 |
+
"LeaseTerms": property_data.get('commercialPropertyDetails', {}).get('leaseTerms', 'N/A')
|
456 |
+
} if property_data.get('commercialPropertyDetails') else None
|
457 |
+
}
|
458 |
+
return formatted_property
|
459 |
+
except Exception as e:
|
460 |
+
logger.error(f"Error formatting property details: {str(e)}")
|
461 |
+
return None
|
462 |
+
|
463 |
+
class CustomRagRetriever:
|
464 |
+
def __init__(self, faiss_index, model, pca=None):
|
465 |
+
self.index = faiss_index
|
466 |
+
self.model = model
|
467 |
+
self.pca = pca
|
468 |
+
self.dimension = faiss_index.d
|
469 |
+
self.model_parallelizer = ModelParallelizer(model)
|
470 |
+
print(f"Initialized CustomRagRetriever with dimension {self.dimension}")
|
471 |
+
|
472 |
+
def retrieve(self, query, top_k=10, distance_threshold=1.0): # Added distance_threshold parameter
|
473 |
+
print(f"\n=== RETRIEVAL DEBUG ===")
|
474 |
+
print(f"Query: {query}")
|
475 |
+
try:
|
476 |
+
# Get query embedding
|
477 |
+
query_embedding = self.model_parallelizer.parallel_encode([query])[0]
|
478 |
+
query_embedding = query_embedding.astype(np.float32)
|
479 |
+
|
480 |
+
# Reshape query embedding to 2D array (1, embedding_dim)
|
481 |
+
query_embedding = query_embedding.reshape(1, -1)
|
482 |
+
|
483 |
+
# Apply PCA if it exists
|
484 |
+
if self.pca is not None:
|
485 |
+
query_embedding = self.pca.transform(query_embedding)
|
486 |
+
|
487 |
+
# Get cached properties first to ensure we have data
|
488 |
+
properties = get_cached_properties()
|
489 |
+
if not properties:
|
490 |
+
logger.error("No properties available in cache")
|
491 |
+
return []
|
492 |
+
|
493 |
+
print(f"\nTotal properties in cache: {len(properties)}")
|
494 |
+
|
495 |
+
# Adjust top_k if it's larger than available properties
|
496 |
+
top_k = min(top_k, len(properties))
|
497 |
+
|
498 |
+
try:
|
499 |
+
# Search for top_k results in the FAISS index
|
500 |
+
distances, indices = self.index.search(query_embedding, top_k)
|
501 |
+
print(f"\nFAISS Search Results:")
|
502 |
+
print(f"Indices: {indices[0]}")
|
503 |
+
print(f"Distances: {distances[0]}")
|
504 |
+
except Exception as e:
|
505 |
+
logger.error(f"Error in FAISS search: {str(e)}")
|
506 |
+
print(f"\nFalling back to text search due to FAISS error: {str(e)}")
|
507 |
+
return self._fallback_search(query, properties, top_k)
|
508 |
+
|
509 |
+
# Process retrieved properties with enhanced scoring
|
510 |
+
retrieved_properties = []
|
511 |
+
seen_properties = set()
|
512 |
+
print("\nProcessing retrieved properties:")
|
513 |
+
|
514 |
+
for idx, dist in zip(indices[0], distances[0]):
|
515 |
+
if idx >= len(properties):
|
516 |
+
continue
|
517 |
+
|
518 |
+
property_data = properties[idx]
|
519 |
+
if not property_data:
|
520 |
+
continue
|
521 |
+
|
522 |
+
property_id = property_data.get('id', str(property_data))
|
523 |
+
if property_id in seen_properties:
|
524 |
+
continue
|
525 |
+
seen_properties.add(property_id)
|
526 |
+
|
527 |
+
if dist > distance_threshold:
|
528 |
+
continue
|
529 |
+
|
530 |
+
# Format property data with all details
|
531 |
+
formatted_property = format_property_details(property_data)
|
532 |
+
if not formatted_property:
|
533 |
+
continue
|
534 |
+
|
535 |
+
# Calculate semantic similarity
|
536 |
+
property_text = f"""
|
537 |
+
Property: {formatted_property['PropertyName']}
|
538 |
+
Type: {formatted_property['PropertyType']}
|
539 |
+
Description: {formatted_property['Description']}
|
540 |
+
Features: {formatted_property['KeyFeatures']}
|
541 |
+
"""
|
542 |
+
|
543 |
+
semantic_score = util.pytorch_cos_sim(
|
544 |
+
self.model_parallelizer.parallel_encode([query])[0],
|
545 |
+
self.model_parallelizer.parallel_encode([property_text])[0]
|
546 |
+
).item()
|
547 |
+
|
548 |
+
# Combine scores (60% distance, 40% semantic)
|
549 |
+
combined_score = (1 - dist) * 0.6 + semantic_score * 0.4
|
550 |
+
|
551 |
+
print(f"\nProperty {idx}:")
|
552 |
+
print(f"Name: {formatted_property['PropertyName']}")
|
553 |
+
print(f"Type: {formatted_property['PropertyType']}")
|
554 |
+
print(f"Distance: {dist}")
|
555 |
+
print(f"Semantic Score: {semantic_score}")
|
556 |
+
print(f"Combined Score: {combined_score}")
|
557 |
+
|
558 |
+
retrieved_properties.append({
|
559 |
+
"property": formatted_property,
|
560 |
+
"distance": float(dist),
|
561 |
+
"semantic_score": semantic_score,
|
562 |
+
"combined_score": combined_score,
|
563 |
+
"match_quality": "perfect" if combined_score > 0.7 else "partial",
|
564 |
+
"confidence": combined_score
|
565 |
+
})
|
566 |
+
|
567 |
+
# Sort by combined score
|
568 |
+
retrieved_properties.sort(key=lambda x: x['combined_score'], reverse=True)
|
569 |
+
|
570 |
+
# If we don't have enough results, add more properties
|
571 |
+
if len(retrieved_properties) < top_k:
|
572 |
+
print(f"\nAdding {top_k - len(retrieved_properties)} more properties to reach {top_k}")
|
573 |
+
remaining_properties = self._get_remaining_properties(
|
574 |
+
properties, retrieved_properties, top_k
|
575 |
+
)
|
576 |
+
retrieved_properties.extend(remaining_properties)
|
577 |
+
|
578 |
+
print(f"\nFinal number of properties retrieved: {len(retrieved_properties)}")
|
579 |
+
print("=== END RETRIEVAL DEBUG ===\n")
|
580 |
+
|
581 |
+
return retrieved_properties[:top_k] # Return exactly top_k properties
|
582 |
+
|
583 |
+
except Exception as e:
|
584 |
+
logger.error(f"Error in retrieve: {str(e)}")
|
585 |
+
print(f"\nError in retrieve: {str(e)}")
|
586 |
+
# Return empty list instead of raising to prevent 500 errors
|
587 |
+
return []
|
588 |
+
|
589 |
+
def _fallback_search(self, query, properties, top_k):
|
590 |
+
"""Fallback search method using simple text matching with enhanced scoring"""
|
591 |
+
print("\n=== FALLBACK SEARCH DEBUG ===")
|
592 |
+
print(f"Query: {query}")
|
593 |
+
print(f"Total properties to search: {len(properties)}")
|
594 |
+
try:
|
595 |
+
query_terms = query.lower().split()
|
596 |
+
print(f"Query terms: {query_terms}")
|
597 |
+
scored_properties = []
|
598 |
+
|
599 |
+
for prop in properties:
|
600 |
+
score = 0
|
601 |
+
# Check property name
|
602 |
+
if prop.get('propertyName'):
|
603 |
+
name_terms = prop['propertyName'].lower().split()
|
604 |
+
name_matches = sum(1 for term in query_terms if any(term in name_term for name_term in name_terms))
|
605 |
+
score += name_matches * 2 # Give more weight to name matches
|
606 |
+
if name_matches > 0:
|
607 |
+
print(f"\nProperty: {prop['propertyName']}")
|
608 |
+
print(f"Name matches: {name_matches}")
|
609 |
+
|
610 |
+
# Check property type
|
611 |
+
if prop.get('typeName'):
|
612 |
+
type_terms = prop['typeName'].lower().split()
|
613 |
+
type_matches = sum(1 for term in query_terms if any(term in type_term for type_term in type_terms))
|
614 |
+
score += type_matches * 1.5 # Give good weight to type matches
|
615 |
+
if type_matches > 0:
|
616 |
+
print(f"Type matches: {type_matches}")
|
617 |
+
|
618 |
+
# Check description
|
619 |
+
if prop.get('description'):
|
620 |
+
desc_terms = prop['description'].lower().split()
|
621 |
+
desc_matches = sum(1 for term in query_terms if any(term in desc_term for desc_term in desc_terms))
|
622 |
+
score += desc_matches
|
623 |
+
if desc_matches > 0:
|
624 |
+
print(f"Description matches: {desc_matches}")
|
625 |
+
|
626 |
+
# Check features
|
627 |
+
if prop.get('features'):
|
628 |
+
feature_matches = sum(1 for term in query_terms if any(term in feature.lower() for feature in prop['features']))
|
629 |
+
score += feature_matches * 1.2 # Give good weight to feature matches
|
630 |
+
if feature_matches > 0:
|
631 |
+
print(f"Feature matches: {feature_matches}")
|
632 |
+
|
633 |
+
if score > 0:
|
634 |
+
formatted_property = format_property_details(prop)
|
635 |
+
if formatted_property:
|
636 |
+
print(f"Total score: {score}")
|
637 |
+
# Convert score to a normalized confidence value
|
638 |
+
confidence = min(score / (len(query_terms) * 2), 1.0)
|
639 |
+
scored_properties.append({
|
640 |
+
"property": formatted_property,
|
641 |
+
"distance": 1.0 / (score + 1), # Convert score to distance
|
642 |
+
"semantic_score": confidence,
|
643 |
+
"combined_score": confidence,
|
644 |
+
"match_quality": "perfect" if confidence > 0.7 else "partial",
|
645 |
+
"confidence": confidence
|
646 |
+
})
|
647 |
+
|
648 |
+
# Sort by score (distance) and return top_k
|
649 |
+
scored_properties.sort(key=lambda x: x['combined_score'], reverse=True)
|
650 |
+
print(f"\nFound {len(scored_properties)} matching properties")
|
651 |
+
print("=== END FALLBACK SEARCH DEBUG ===\n")
|
652 |
+
return scored_properties[:top_k]
|
653 |
+
|
654 |
+
except Exception as e:
|
655 |
+
logger.error(f"Error in fallback search: {str(e)}")
|
656 |
+
print(f"\nError in fallback search: {str(e)}")
|
657 |
+
return []
|
658 |
+
|
659 |
+
def _get_remaining_properties(self, properties, retrieved_properties, top_k):
|
660 |
+
"""Get additional properties to fill up to top_k with basic scoring"""
|
661 |
+
try:
|
662 |
+
remaining = []
|
663 |
+
retrieved_ids = {p['property']['PropertyName'] for p in retrieved_properties}
|
664 |
+
|
665 |
+
for prop in properties:
|
666 |
+
if len(remaining) >= top_k - len(retrieved_properties):
|
667 |
+
break
|
668 |
+
|
669 |
+
if prop.get('propertyName') not in retrieved_ids:
|
670 |
+
formatted_property = format_property_details(prop)
|
671 |
+
if formatted_property:
|
672 |
+
remaining.append({
|
673 |
+
"property": formatted_property,
|
674 |
+
"distance": 1.0, # High distance score for additional properties
|
675 |
+
"semantic_score": 0.0,
|
676 |
+
"combined_score": 0.0,
|
677 |
+
"match_quality": "partial",
|
678 |
+
"confidence": 0.0
|
679 |
+
})
|
680 |
+
retrieved_ids.add(prop['propertyName'])
|
681 |
+
|
682 |
+
return remaining
|
683 |
+
|
684 |
+
except Exception as e:
|
685 |
+
logger.error(f"Error getting remaining properties: {str(e)}")
|
686 |
+
return []
|
687 |
+
|
688 |
+
def load_tokenizer_and_model():
|
689 |
+
print("Loading tokenizer and LLM model...")
|
690 |
+
try:
|
691 |
+
# Load tokenizer
|
692 |
+
tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_DIR)
|
693 |
+
|
694 |
+
# Load model and move to device
|
695 |
+
model_llm = AutoModelForCausalLM.from_pretrained(LLM_MODEL_DIR)
|
696 |
+
model_llm = model_llm.to(device)
|
697 |
+
model_llm.eval() # Set to evaluation mode
|
698 |
+
|
699 |
+
print("Tokenizer and LLM model loaded successfully.")
|
700 |
+
return tokenizer, model_llm
|
701 |
+
except Exception as e:
|
702 |
+
logger.error(f"Error loading tokenizer/model: {str(e)}")
|
703 |
+
raise
|
modules/nlp_processor.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import spacy
|
3 |
+
import numpy as np
|
4 |
+
from transformers import pipeline
|
5 |
+
from sentence_transformers import SentenceTransformer
|
6 |
+
import torch
|
7 |
+
from typing import Dict, List, Tuple, Union
|
8 |
+
import logging
|
9 |
+
|
10 |
+
class NLPProcessor:
|
11 |
+
def __init__(self):
|
12 |
+
# Load lightweight models
|
13 |
+
self.nlp = spacy.load("en_core_web_sm")
|
14 |
+
self.ner_pipeline = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english")
|
15 |
+
self.zero_shot = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
16 |
+
self.sentence_transformer = SentenceTransformer('all-MiniLM-L6-v2')
|
17 |
+
|
18 |
+
# Initialize numerical extraction patterns
|
19 |
+
self.number_patterns = {
|
20 |
+
'price': r'\$?\d+(?:,\d{3})*(?:\.\d{2})?',
|
21 |
+
'sqft': r'\d+(?:,\d{3})*\s*(?:sq\s*ft|square\s*feet)',
|
22 |
+
'year': r'(?:19|20)\d{2}',
|
23 |
+
'beds': r'\d+\s*(?:bed|beds|bedroom|bedrooms)',
|
24 |
+
'baths': r'\d+\s*(?:bath|baths|bathroom|bathrooms)'
|
25 |
+
}
|
26 |
+
|
27 |
+
# Property status categories
|
28 |
+
self.status_categories = [
|
29 |
+
"available", "sold", "pending", "under contract",
|
30 |
+
"off market", "coming soon", "active", "inactive"
|
31 |
+
]
|
32 |
+
|
33 |
+
# Currency conversion rates (example)
|
34 |
+
self.currency_rates = {
|
35 |
+
'lakh': 100000,
|
36 |
+
'crore': 10000000,
|
37 |
+
'million': 1000000,
|
38 |
+
'billion': 1000000000
|
39 |
+
}
|
40 |
+
|
41 |
+
def convert_currency(self, value: str) -> float:
|
42 |
+
"""Convert various currency formats to a standard number"""
|
43 |
+
try:
|
44 |
+
# Remove currency symbols and commas
|
45 |
+
value = re.sub(r'[^\d.]', '', value)
|
46 |
+
|
47 |
+
# Check for word-based numbers
|
48 |
+
value_lower = value.lower()
|
49 |
+
for word, multiplier in self.currency_rates.items():
|
50 |
+
if word in value_lower:
|
51 |
+
# Extract the number and multiply by the rate
|
52 |
+
num = float(re.sub(r'[^\d.]', '', value))
|
53 |
+
return num * multiplier
|
54 |
+
|
55 |
+
# If no special words found, return the number as is
|
56 |
+
return float(value)
|
57 |
+
except (ValueError, TypeError):
|
58 |
+
return 0.0
|
59 |
+
|
60 |
+
def extract_numerical_values(self, text: str) -> Dict[str, Union[float, int]]:
|
61 |
+
"""Extract numerical values from text using regex and NLP"""
|
62 |
+
values = {}
|
63 |
+
|
64 |
+
# Extract numbers using patterns
|
65 |
+
for key, pattern in self.number_patterns.items():
|
66 |
+
matches = re.finditer(pattern, text.lower())
|
67 |
+
for match in matches:
|
68 |
+
value = match.group()
|
69 |
+
# Clean and convert value
|
70 |
+
if key == 'price':
|
71 |
+
values[key] = self.convert_currency(value)
|
72 |
+
elif key in ['sqft', 'beds', 'baths']:
|
73 |
+
values[key] = int(re.sub(r'[^\d]', '', value))
|
74 |
+
elif key == 'year':
|
75 |
+
values[key] = int(value)
|
76 |
+
|
77 |
+
return values
|
78 |
+
|
79 |
+
def classify_property_status(self, text: str) -> str:
|
80 |
+
"""Classify property status using zero-shot classification"""
|
81 |
+
result = self.zero_shot(
|
82 |
+
text,
|
83 |
+
candidate_labels=self.status_categories,
|
84 |
+
multi_label=False
|
85 |
+
)
|
86 |
+
return result['labels'][0]
|
87 |
+
|
88 |
+
def extract_landmarks(self, text: str) -> List[str]:
|
89 |
+
"""Extract landmarks and points of interest using NER"""
|
90 |
+
doc = self.nlp(text)
|
91 |
+
landmarks = []
|
92 |
+
|
93 |
+
# Extract named entities
|
94 |
+
for ent in doc.ents:
|
95 |
+
if ent.label_ in ['FAC', 'ORG', 'LOC']:
|
96 |
+
landmarks.append(ent.text)
|
97 |
+
|
98 |
+
return landmarks
|
99 |
+
|
100 |
+
def semantic_similarity(self, query: str, candidates: List[str]) -> List[Tuple[str, float]]:
|
101 |
+
"""Calculate semantic similarity between query and candidates"""
|
102 |
+
query_embedding = self.sentence_transformer.encode(query)
|
103 |
+
candidate_embeddings = self.sentence_transformer.encode(candidates)
|
104 |
+
|
105 |
+
similarities = []
|
106 |
+
for candidate, embedding in zip(candidates, candidate_embeddings):
|
107 |
+
similarity = np.dot(query_embedding, embedding) / (
|
108 |
+
np.linalg.norm(query_embedding) * np.linalg.norm(embedding)
|
109 |
+
)
|
110 |
+
similarities.append((candidate, float(similarity)))
|
111 |
+
|
112 |
+
return sorted(similarities, key=lambda x: x[1], reverse=True)
|
113 |
+
|
114 |
+
def process_query(self, query: str) -> Dict:
|
115 |
+
"""Process a natural language query and extract structured information"""
|
116 |
+
# Extract numerical values
|
117 |
+
numerical_values = self.extract_numerical_values(query)
|
118 |
+
|
119 |
+
# Extract landmarks
|
120 |
+
landmarks = self.extract_landmarks(query)
|
121 |
+
|
122 |
+
# Classify property status if mentioned
|
123 |
+
status = None
|
124 |
+
if any(status_word in query.lower() for status_word in self.status_categories):
|
125 |
+
status = self.classify_property_status(query)
|
126 |
+
|
127 |
+
return {
|
128 |
+
'numerical_values': numerical_values,
|
129 |
+
'landmarks': landmarks,
|
130 |
+
'status': status,
|
131 |
+
'original_query': query
|
132 |
+
}
|
133 |
+
|
134 |
+
def format_property_details(self, property_data: Dict) -> str:
|
135 |
+
"""Format property details in a natural language format"""
|
136 |
+
details = []
|
137 |
+
|
138 |
+
# Basic information
|
139 |
+
if 'PropertyName' in property_data:
|
140 |
+
details.append(f"Property: {property_data['PropertyName']}")
|
141 |
+
if 'Address' in property_data:
|
142 |
+
details.append(f"Location: {property_data['Address']}")
|
143 |
+
|
144 |
+
# Numerical details
|
145 |
+
if 'Beds' in property_data:
|
146 |
+
details.append(f"{property_data['Beds']} bedrooms")
|
147 |
+
if 'Baths' in property_data:
|
148 |
+
details.append(f"{property_data['Baths']} bathrooms")
|
149 |
+
if 'LeasableSquareFeet' in property_data:
|
150 |
+
details.append(f"{property_data['LeasableSquareFeet']} square feet")
|
151 |
+
|
152 |
+
# Status and price
|
153 |
+
if 'PropertyStatus' in property_data:
|
154 |
+
details.append(f"Status: {property_data['PropertyStatus']}")
|
155 |
+
if 'MarketValue' in property_data:
|
156 |
+
details.append(f"Price: ${property_data['MarketValue']:,.2f}")
|
157 |
+
|
158 |
+
return "\n".join(details)
|
modules/parallel.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import multiprocessing
|
2 |
+
import threading
|
3 |
+
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
|
4 |
+
import torch
|
5 |
+
import logging
|
6 |
+
from functools import partial
|
7 |
+
import queue
|
8 |
+
import time
|
9 |
+
|
10 |
+
# Configure logging
|
11 |
+
logging.basicConfig(level=logging.INFO)
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
# Global thread pool for I/O bound tasks
|
15 |
+
thread_pool = ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2)
|
16 |
+
|
17 |
+
# Global process pool for CPU bound tasks
|
18 |
+
process_pool = ProcessPoolExecutor(max_workers=multiprocessing.cpu_count())
|
19 |
+
|
20 |
+
# Queue for managing async tasks
|
21 |
+
task_queue = queue.Queue()
|
22 |
+
|
23 |
+
def get_device():
|
24 |
+
"""Get the appropriate device for computation"""
|
25 |
+
return "cuda" if torch.cuda.is_available() else "cpu"
|
26 |
+
|
27 |
+
def parallel_map(func, items, use_processes=False):
|
28 |
+
"""Execute a function in parallel on a list of items"""
|
29 |
+
executor = process_pool if use_processes else thread_pool
|
30 |
+
return list(executor.map(func, items))
|
31 |
+
|
32 |
+
def batch_process(items, batch_size=32, func=None):
|
33 |
+
"""Process items in batches"""
|
34 |
+
results = []
|
35 |
+
for i in range(0, len(items), batch_size):
|
36 |
+
batch = items[i:i + batch_size]
|
37 |
+
if func:
|
38 |
+
batch_results = parallel_map(func, batch)
|
39 |
+
else:
|
40 |
+
batch_results = batch
|
41 |
+
results.extend(batch_results)
|
42 |
+
return results
|
43 |
+
|
44 |
+
class AsyncTaskManager:
|
45 |
+
def __init__(self, max_workers=None):
|
46 |
+
self.max_workers = max_workers or multiprocessing.cpu_count()
|
47 |
+
self.thread_pool = ThreadPoolExecutor(max_workers=self.max_workers)
|
48 |
+
self.tasks = {}
|
49 |
+
self.results = {}
|
50 |
+
self.lock = threading.Lock()
|
51 |
+
|
52 |
+
def submit_task(self, task_id, func, *args, **kwargs):
|
53 |
+
"""Submit a task to be executed asynchronously"""
|
54 |
+
future = self.thread_pool.submit(func, *args, **kwargs)
|
55 |
+
with self.lock:
|
56 |
+
self.tasks[task_id] = future
|
57 |
+
return task_id
|
58 |
+
|
59 |
+
def get_result(self, task_id, timeout=None):
|
60 |
+
"""Get the result of a task"""
|
61 |
+
if task_id not in self.tasks:
|
62 |
+
return None
|
63 |
+
|
64 |
+
if task_id in self.results:
|
65 |
+
return self.results[task_id]
|
66 |
+
|
67 |
+
try:
|
68 |
+
result = self.tasks[task_id].result(timeout=timeout)
|
69 |
+
with self.lock:
|
70 |
+
self.results[task_id] = result
|
71 |
+
return result
|
72 |
+
except Exception as e:
|
73 |
+
logger.error(f"Error getting result for task {task_id}: {str(e)}")
|
74 |
+
return None
|
75 |
+
|
76 |
+
def cancel_task(self, task_id):
|
77 |
+
"""Cancel a running task"""
|
78 |
+
if task_id in self.tasks:
|
79 |
+
self.tasks[task_id].cancel()
|
80 |
+
with self.lock:
|
81 |
+
del self.tasks[task_id]
|
82 |
+
|
83 |
+
def cleanup(self):
|
84 |
+
"""Clean up completed tasks"""
|
85 |
+
with self.lock:
|
86 |
+
completed_tasks = [task_id for task_id, future in self.tasks.items()
|
87 |
+
if future.done()]
|
88 |
+
for task_id in completed_tasks:
|
89 |
+
if task_id not in self.results:
|
90 |
+
try:
|
91 |
+
self.results[task_id] = self.tasks[task_id].result()
|
92 |
+
except Exception:
|
93 |
+
pass
|
94 |
+
del self.tasks[task_id]
|
95 |
+
|
96 |
+
class ModelParallelizer:
|
97 |
+
def __init__(self, model, batch_size=32):
|
98 |
+
self.model = model
|
99 |
+
self.batch_size = batch_size
|
100 |
+
self.device = get_device()
|
101 |
+
self.model = self.model.to(self.device)
|
102 |
+
self.model.eval()
|
103 |
+
|
104 |
+
def parallel_predict(self, inputs):
|
105 |
+
"""Run predictions in parallel using batching"""
|
106 |
+
results = []
|
107 |
+
with torch.no_grad():
|
108 |
+
for i in range(0, len(inputs), self.batch_size):
|
109 |
+
batch = inputs[i:i + self.batch_size]
|
110 |
+
batch = torch.stack(batch).to(self.device)
|
111 |
+
batch_results = self.model(batch)
|
112 |
+
results.extend(batch_results.cpu().numpy())
|
113 |
+
return results
|
114 |
+
|
115 |
+
def parallel_encode(self, texts):
|
116 |
+
"""Encode texts in parallel using batching"""
|
117 |
+
return batch_process(texts, self.batch_size, self.model.encode)
|
118 |
+
|
119 |
+
# Global task manager instance
|
120 |
+
task_manager = AsyncTaskManager()
|
121 |
+
|
122 |
+
def cleanup_resources():
|
123 |
+
"""Clean up all parallel processing resources"""
|
124 |
+
thread_pool.shutdown(wait=True)
|
125 |
+
process_pool.shutdown(wait=True)
|
126 |
+
task_manager.cleanup()
|
modules/property_processor.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Optional, Union, Tuple
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
from datetime import datetime
|
5 |
+
import logging
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
|
8 |
+
class PropertyProcessor:
|
9 |
+
def __init__(self):
|
10 |
+
self.sentence_transformer = SentenceTransformer('all-MiniLM-L6-v2')
|
11 |
+
|
12 |
+
def format_property_details(self, property_data: Dict) -> str:
|
13 |
+
"""Format property details into a natural language string"""
|
14 |
+
print(f"\n=== Formatting property details for: {property_data.get('propertyName', 'Unknown')} ===")
|
15 |
+
details = []
|
16 |
+
|
17 |
+
# Basic information
|
18 |
+
if property_data.get('Address'):
|
19 |
+
details.append(f"Located at {property_data['Address']}")
|
20 |
+
|
21 |
+
# Handle PG properties specifically
|
22 |
+
if property_data.get('typeName', '').lower() == 'pg':
|
23 |
+
print("Processing PG property")
|
24 |
+
pg_details = property_data.get('pgPropertyDetails', {})
|
25 |
+
if pg_details:
|
26 |
+
details.append("PG Accommodation")
|
27 |
+
if pg_details.get('totalBeds'):
|
28 |
+
details.append(f"Total Beds: {pg_details['totalBeds']}")
|
29 |
+
if pg_details.get('availableFor'):
|
30 |
+
details.append(f"Available for: {pg_details['availableFor']}")
|
31 |
+
if pg_details.get('foodIncluded'):
|
32 |
+
details.append(f"Food: {pg_details['foodIncluded']}")
|
33 |
+
if pg_details.get('wifiAvailable'):
|
34 |
+
details.append(f"WiFi: {'Available' if pg_details['wifiAvailable'] else 'Not Available'}")
|
35 |
+
else:
|
36 |
+
# Regular property details
|
37 |
+
if property_data.get('BHK'):
|
38 |
+
details.append(f"{property_data['BHK']} BHK")
|
39 |
+
|
40 |
+
if property_data.get('Bathrooms'):
|
41 |
+
details.append(f"with {property_data['Bathrooms']} bathrooms")
|
42 |
+
|
43 |
+
if property_data.get('Square_Footage'):
|
44 |
+
details.append(f"covering {property_data['Square_Footage']} sq ft")
|
45 |
+
|
46 |
+
if property_data.get('Year_Built'):
|
47 |
+
details.append(f"built in {property_data['Year_Built']}")
|
48 |
+
|
49 |
+
if property_data.get('Market_Value'):
|
50 |
+
details.append(f"priced at ${property_data['Market_Value']:,.2f}")
|
51 |
+
|
52 |
+
# Enhanced status display
|
53 |
+
if property_data.get('Status'):
|
54 |
+
status = property_data['Status'].lower()
|
55 |
+
status_display = {
|
56 |
+
'available': 'Available for purchase/rent',
|
57 |
+
'sold': 'Sold',
|
58 |
+
'pending': 'Sale/Rental pending',
|
59 |
+
'under contract': 'Under contract',
|
60 |
+
'off market': 'Currently off market',
|
61 |
+
'coming soon': 'Coming soon to market',
|
62 |
+
'active': 'Active listing',
|
63 |
+
'inactive': 'Inactive listing'
|
64 |
+
}.get(status, f"Status: {property_data['Status']}")
|
65 |
+
details.append(status_display)
|
66 |
+
|
67 |
+
if property_data.get('Distance'):
|
68 |
+
details.append(f"Distance: {property_data['Distance']} miles")
|
69 |
+
|
70 |
+
# Add landmark information if available
|
71 |
+
if property_data.get('Nearby_Landmarks'):
|
72 |
+
landmarks = property_data['Nearby_Landmarks']
|
73 |
+
if isinstance(landmarks, list):
|
74 |
+
details.append(f"Nearby landmarks: {', '.join(landmarks)}")
|
75 |
+
elif isinstance(landmarks, str):
|
76 |
+
details.append(f"Nearby landmarks: {landmarks}")
|
77 |
+
|
78 |
+
formatted_details = " | ".join(details)
|
79 |
+
print(f"Formatted details: {formatted_details}")
|
80 |
+
return formatted_details
|
81 |
+
|
82 |
+
def filter_by_numerical_range(self,
|
83 |
+
properties: List[Dict],
|
84 |
+
field: str,
|
85 |
+
min_value: Optional[float] = None,
|
86 |
+
max_value: Optional[float] = None) -> List[Dict]:
|
87 |
+
"""Filter properties based on numerical range"""
|
88 |
+
filtered_properties = []
|
89 |
+
|
90 |
+
for property_data in properties:
|
91 |
+
try:
|
92 |
+
value = float(property_data.get(field, 0))
|
93 |
+
|
94 |
+
if min_value is not None and value < min_value:
|
95 |
+
continue
|
96 |
+
|
97 |
+
if max_value is not None and value > max_value:
|
98 |
+
continue
|
99 |
+
|
100 |
+
filtered_properties.append(property_data)
|
101 |
+
|
102 |
+
except (ValueError, TypeError) as e:
|
103 |
+
logging.error(f"Error filtering {field}: {str(e)}")
|
104 |
+
continue
|
105 |
+
|
106 |
+
return filtered_properties
|
107 |
+
|
108 |
+
def filter_by_status(self,
|
109 |
+
properties: List[Dict],
|
110 |
+
status: str) -> List[Dict]:
|
111 |
+
"""Filter properties by status"""
|
112 |
+
return [p for p in properties
|
113 |
+
if p.get('Status', '').lower() == status.lower()]
|
114 |
+
|
115 |
+
def filter_by_bhk(self,
|
116 |
+
properties: List[Dict],
|
117 |
+
bhk: Union[int, str]) -> List[Dict]:
|
118 |
+
"""Filter properties by BHK count"""
|
119 |
+
try:
|
120 |
+
bhk_value = int(bhk) if isinstance(bhk, str) else bhk
|
121 |
+
return [p for p in properties
|
122 |
+
if int(p.get('BHK', 0)) == bhk_value]
|
123 |
+
except (ValueError, TypeError):
|
124 |
+
return []
|
125 |
+
|
126 |
+
def filter_by_bathrooms(self,
|
127 |
+
properties: List[Dict],
|
128 |
+
bathroom_count: Union[int, str]) -> List[Dict]:
|
129 |
+
"""Filter properties by bathroom count"""
|
130 |
+
try:
|
131 |
+
bath_value = int(bathroom_count) if isinstance(bathroom_count, str) else bathroom_count
|
132 |
+
return [p for p in properties
|
133 |
+
if int(p.get('Bathrooms', 0)) == bath_value]
|
134 |
+
except (ValueError, TypeError):
|
135 |
+
return []
|
136 |
+
|
137 |
+
def filter_by_year_built(self,
|
138 |
+
properties: List[Dict],
|
139 |
+
min_year: Optional[int] = None,
|
140 |
+
max_year: Optional[int] = None) -> List[Dict]:
|
141 |
+
"""Filter properties by year built"""
|
142 |
+
return self.filter_by_numerical_range(
|
143 |
+
properties, 'Year_Built', min_year, max_year
|
144 |
+
)
|
145 |
+
|
146 |
+
def filter_by_square_footage(self,
|
147 |
+
properties: List[Dict],
|
148 |
+
min_sqft: Optional[float] = None,
|
149 |
+
max_sqft: Optional[float] = None) -> List[Dict]:
|
150 |
+
"""Filter properties by square footage"""
|
151 |
+
return self.filter_by_numerical_range(
|
152 |
+
properties, 'Square_Footage', min_sqft, max_sqft
|
153 |
+
)
|
154 |
+
|
155 |
+
def filter_by_market_value(self,
|
156 |
+
properties: List[Dict],
|
157 |
+
min_value: Optional[float] = None,
|
158 |
+
max_value: Optional[float] = None) -> List[Dict]:
|
159 |
+
"""Filter properties by market value"""
|
160 |
+
return self.filter_by_numerical_range(
|
161 |
+
properties, 'Market_Value', min_value, max_value
|
162 |
+
)
|
163 |
+
|
164 |
+
def get_property_embedding(self, property_data: Dict) -> np.ndarray:
|
165 |
+
"""Get embedding for property description"""
|
166 |
+
description = self.format_property_details(property_data)
|
167 |
+
return self.sentence_transformer.encode(description)
|
168 |
+
|
169 |
+
def find_similar_properties(self,
|
170 |
+
reference_property: Dict,
|
171 |
+
candidate_properties: List[Dict],
|
172 |
+
top_k: int = 5) -> List[Tuple[Dict, float]]:
|
173 |
+
"""Find properties similar to reference property"""
|
174 |
+
ref_embedding = self.get_property_embedding(reference_property)
|
175 |
+
|
176 |
+
similarities = []
|
177 |
+
for property_data in candidate_properties:
|
178 |
+
try:
|
179 |
+
prop_embedding = self.get_property_embedding(property_data)
|
180 |
+
similarity = np.dot(ref_embedding, prop_embedding) / (
|
181 |
+
np.linalg.norm(ref_embedding) * np.linalg.norm(prop_embedding)
|
182 |
+
)
|
183 |
+
similarities.append((property_data, float(similarity)))
|
184 |
+
except Exception as e:
|
185 |
+
logging.error(f"Error calculating similarity: {str(e)}")
|
186 |
+
continue
|
187 |
+
|
188 |
+
return sorted(similarities, key=lambda x: x[1], reverse=True)[:top_k]
|
189 |
+
|
190 |
+
def format_zip_code(self, zip_code: Union[str, float, int]) -> str:
|
191 |
+
"""Format zip code as string"""
|
192 |
+
try:
|
193 |
+
return str(int(float(zip_code)))
|
194 |
+
except (ValueError, TypeError):
|
195 |
+
return str(zip_code)
|
196 |
+
|
197 |
+
def process_property_data(self, property_data: Dict) -> Dict:
|
198 |
+
"""Process and clean property data"""
|
199 |
+
processed_data = property_data.copy()
|
200 |
+
|
201 |
+
# Format zip code
|
202 |
+
if 'Zip_Code' in processed_data:
|
203 |
+
processed_data['Zip_Code'] = self.format_zip_code(
|
204 |
+
processed_data['Zip_Code']
|
205 |
+
)
|
206 |
+
|
207 |
+
# Convert numerical fields
|
208 |
+
numerical_fields = [
|
209 |
+
'Square_Footage', 'Market_Value', 'Year_Built',
|
210 |
+
'BHK', 'Bathrooms', 'Latitude', 'Longitude'
|
211 |
+
]
|
212 |
+
|
213 |
+
for field in numerical_fields:
|
214 |
+
if field in processed_data:
|
215 |
+
try:
|
216 |
+
processed_data[field] = float(processed_data[field])
|
217 |
+
except (ValueError, TypeError):
|
218 |
+
processed_data[field] = 0.0
|
219 |
+
|
220 |
+
return processed_data
|
modules/rag/feature_matcher.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
from sentence_transformers import SentenceTransformer, util
|
4 |
+
from transformers import pipeline
|
5 |
+
import pickle
|
6 |
+
from pathlib import Path
|
7 |
+
from typing import Dict, List, Any, Tuple
|
8 |
+
import logging
|
9 |
+
|
10 |
+
# Configure logging
|
11 |
+
logging.basicConfig(level=logging.INFO)
|
12 |
+
logger = logging.getLogger(__name__)
|
13 |
+
|
14 |
+
class DynamicFeatureMatcher:
|
15 |
+
def __init__(self, load_saved=False):
|
16 |
+
self.model_path = Path("models/saved_models/feature_matcher")
|
17 |
+
self.model_path.mkdir(parents=True, exist_ok=True)
|
18 |
+
|
19 |
+
if load_saved:
|
20 |
+
self._load_models()
|
21 |
+
else:
|
22 |
+
self._initialize_models()
|
23 |
+
|
24 |
+
def _load_models(self):
|
25 |
+
"""Load all saved models and patterns"""
|
26 |
+
# Load base model
|
27 |
+
self.models = {
|
28 |
+
'base': SentenceTransformer(str(self.model_path / "base_model")).to(device),
|
29 |
+
'semantic': SentenceTransformer(str(self.model_path / "semantic_model")).to(device)
|
30 |
+
}
|
31 |
+
|
32 |
+
# Load zero-shot model
|
33 |
+
with open(self.model_path / "zero_shot_model", 'rb') as f:
|
34 |
+
self.models['zero_shot'] = pickle.load(f)
|
35 |
+
|
36 |
+
# Load feature patterns
|
37 |
+
with open(self.model_path / "feature_patterns.pkl", 'rb') as f:
|
38 |
+
self.feature_patterns = pickle.load(f)
|
39 |
+
|
40 |
+
# Initialize embedding cache
|
41 |
+
self.embedding_cache = {}
|
42 |
+
|
43 |
+
def _initialize_models(self):
|
44 |
+
"""Initialize new models"""
|
45 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
46 |
+
self.models = {
|
47 |
+
'base': SentenceTransformer('all-MiniLM-L6-v2').to(device),
|
48 |
+
'zero_shot': pipeline("zero-shot-classification",
|
49 |
+
model="facebook/bart-large-mnli",
|
50 |
+
device=0 if torch.cuda.is_available() else -1),
|
51 |
+
'semantic': SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2').to(device)
|
52 |
+
}
|
53 |
+
|
54 |
+
# Initialize feature patterns
|
55 |
+
self.feature_patterns = self._initialize_feature_patterns()
|
56 |
+
|
57 |
+
# Initialize embedding cache
|
58 |
+
self.embedding_cache = {}
|
59 |
+
|
60 |
+
def _initialize_feature_patterns(self) -> Dict[str, Dict[str, Any]]:
|
61 |
+
"""Initialize feature patterns for different property types"""
|
62 |
+
return {
|
63 |
+
'wifi': {
|
64 |
+
'semantic_patterns': [
|
65 |
+
"wifi available",
|
66 |
+
"internet access",
|
67 |
+
"wireless internet",
|
68 |
+
"high-speed internet"
|
69 |
+
],
|
70 |
+
'context_patterns': [
|
71 |
+
"wifi",
|
72 |
+
"internet",
|
73 |
+
"wireless",
|
74 |
+
"wi-fi"
|
75 |
+
],
|
76 |
+
'pg': 'wifiAvailable',
|
77 |
+
'commercial': 'wifiAvailable'
|
78 |
+
},
|
79 |
+
'ac': {
|
80 |
+
'semantic_patterns': [
|
81 |
+
"air conditioning",
|
82 |
+
"central air",
|
83 |
+
"climate control",
|
84 |
+
"cooling system"
|
85 |
+
],
|
86 |
+
'context_patterns': [
|
87 |
+
"ac",
|
88 |
+
"air conditioning",
|
89 |
+
"central air",
|
90 |
+
"cooling"
|
91 |
+
],
|
92 |
+
'pg': 'isACAvailable',
|
93 |
+
'commercial': 'isACAvailable'
|
94 |
+
},
|
95 |
+
'parking': {
|
96 |
+
'semantic_patterns': [
|
97 |
+
"parking available",
|
98 |
+
"car parking",
|
99 |
+
"garage",
|
100 |
+
"parking space"
|
101 |
+
],
|
102 |
+
'context_patterns': [
|
103 |
+
"parking",
|
104 |
+
"garage",
|
105 |
+
"car space",
|
106 |
+
"vehicle parking"
|
107 |
+
],
|
108 |
+
'pg': 'isParkingAvailable',
|
109 |
+
'commercial': 'hasParking'
|
110 |
+
},
|
111 |
+
'power_backup': {
|
112 |
+
'semantic_patterns': [
|
113 |
+
"power backup",
|
114 |
+
"generator",
|
115 |
+
"backup power",
|
116 |
+
"uninterrupted power"
|
117 |
+
],
|
118 |
+
'context_patterns': [
|
119 |
+
"power backup",
|
120 |
+
"generator",
|
121 |
+
"ups",
|
122 |
+
"inverter"
|
123 |
+
],
|
124 |
+
'pg': 'powerBackup',
|
125 |
+
'commercial': 'powerBackup'
|
126 |
+
}
|
127 |
+
}
|
128 |
+
|
129 |
+
def _get_embedding(self, text: str) -> torch.Tensor:
|
130 |
+
"""Get embedding for text with caching"""
|
131 |
+
if text in self.embedding_cache:
|
132 |
+
return self.embedding_cache[text]
|
133 |
+
|
134 |
+
embedding = self.models['base'].encode(text, convert_to_tensor=True)
|
135 |
+
self.embedding_cache[text] = embedding
|
136 |
+
return embedding
|
137 |
+
|
138 |
+
def _analyze_negation(self, query: str) -> Tuple[bool, float]:
|
139 |
+
"""Analyze if query contains negation"""
|
140 |
+
negation_patterns = [
|
141 |
+
"no", "not", "without", "lack of", "missing",
|
142 |
+
"doesn't have", "don't have", "doesn't need",
|
143 |
+
"don't need", "isn't", "aren't"
|
144 |
+
]
|
145 |
+
|
146 |
+
query_lower = query.lower()
|
147 |
+
has_negation = any(pattern in query_lower for pattern in negation_patterns)
|
148 |
+
|
149 |
+
# Calculate confidence based on negation word position
|
150 |
+
confidence = 0.0
|
151 |
+
if has_negation:
|
152 |
+
words = query_lower.split()
|
153 |
+
for i, word in enumerate(words):
|
154 |
+
if word in negation_patterns:
|
155 |
+
# Higher confidence if negation is closer to feature words
|
156 |
+
confidence = max(confidence, 1.0 - (i / len(words)))
|
157 |
+
|
158 |
+
return has_negation, confidence
|
159 |
+
|
160 |
+
def _analyze_feature_presence(self, query: str, feature: str, patterns: Dict[str, Any]) -> Tuple[bool, float]:
|
161 |
+
"""Analyze if a feature is mentioned in the query using multiple methods"""
|
162 |
+
query_lower = query.lower()
|
163 |
+
|
164 |
+
# 1. Zero-shot classification
|
165 |
+
zero_shot_result = self.models['zero_shot'](
|
166 |
+
query,
|
167 |
+
[f"This property has {feature}", f"This property does not have {feature}"]
|
168 |
+
)
|
169 |
+
|
170 |
+
# 2. Semantic similarity
|
171 |
+
query_embedding = self._get_embedding(query)
|
172 |
+
pattern_embeddings = [self._get_embedding(p) for p in patterns['semantic_patterns']]
|
173 |
+
similarities = [util.pytorch_cos_sim(query_embedding, p).item() for p in pattern_embeddings]
|
174 |
+
max_similarity = max(similarities) if similarities else 0
|
175 |
+
|
176 |
+
# 3. Context pattern matching
|
177 |
+
context_match = any(pattern in query_lower for pattern in patterns['context_patterns'])
|
178 |
+
|
179 |
+
# Combine results with weights
|
180 |
+
presence_score = (
|
181 |
+
0.4 * zero_shot_result['scores'][0] +
|
182 |
+
0.4 * max_similarity +
|
183 |
+
0.2 * (1.0 if context_match else 0.0)
|
184 |
+
)
|
185 |
+
|
186 |
+
return presence_score > 0.5, presence_score
|
187 |
+
|
188 |
+
def analyze_query(self, query: str) -> Dict[str, bool]:
|
189 |
+
"""Analyze query to understand feature requirements"""
|
190 |
+
# Analyze negation
|
191 |
+
has_negation, negation_confidence = self._analyze_negation(query)
|
192 |
+
|
193 |
+
# Analyze each feature
|
194 |
+
feature_requirements = {}
|
195 |
+
for feature, patterns in self.feature_patterns.items():
|
196 |
+
is_present, confidence = self._analyze_feature_presence(query, feature, patterns)
|
197 |
+
if is_present:
|
198 |
+
feature_requirements[feature] = not has_negation
|
199 |
+
|
200 |
+
return feature_requirements
|
201 |
+
|
202 |
+
def check_property_features(self, property_data: Dict[str, Any], feature_requirements: Dict[str, bool]) -> bool:
|
203 |
+
"""Check if property meets the feature requirements"""
|
204 |
+
property_type = property_data.get('typeName', '').lower()
|
205 |
+
|
206 |
+
for feature, required in feature_requirements.items():
|
207 |
+
patterns = self.feature_patterns[feature]
|
208 |
+
|
209 |
+
if 'pg' in property_type or 'hostel' in property_type:
|
210 |
+
pg_details = property_data.get('pgPropertyDetails', {})
|
211 |
+
if pg_details and patterns['pg']:
|
212 |
+
if pg_details.get(patterns['pg'], False) != required:
|
213 |
+
return False
|
214 |
+
elif any(t in property_type for t in ['office', 'shop', 'commercial']):
|
215 |
+
commercial_details = property_data.get('commercialPropertyDetails', {})
|
216 |
+
if commercial_details and patterns['commercial']:
|
217 |
+
if commercial_details.get(patterns['commercial'], False) != required:
|
218 |
+
return False
|
219 |
+
|
220 |
+
return True
|
221 |
+
|
222 |
+
def save_models(self):
|
223 |
+
"""Save all models and patterns"""
|
224 |
+
try:
|
225 |
+
# Save base model
|
226 |
+
self.models['base'].save(str(self.model_path / "base_model"))
|
227 |
+
|
228 |
+
# Save semantic model
|
229 |
+
self.models['semantic'].save(str(self.model_path / "semantic_model"))
|
230 |
+
|
231 |
+
# Save zero-shot model
|
232 |
+
with open(self.model_path / "zero_shot_model", 'wb') as f:
|
233 |
+
pickle.dump(self.models['zero_shot'], f)
|
234 |
+
|
235 |
+
# Save feature patterns
|
236 |
+
with open(self.model_path / "feature_patterns.pkl", 'wb') as f:
|
237 |
+
pickle.dump(self.feature_patterns, f)
|
238 |
+
|
239 |
+
logger.info("Successfully saved all models and patterns")
|
240 |
+
|
241 |
+
except Exception as e:
|
242 |
+
logger.error(f"Error saving models: {str(e)}")
|
243 |
+
raise
|
modules/response.py
ADDED
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import logging
|
3 |
+
from modules.config import UserPlan, PLAN_FIELDS
|
4 |
+
from modules.parallel import ModelParallelizer, parallel_map, batch_process
|
5 |
+
import re
|
6 |
+
import torch
|
7 |
+
import numpy as np
|
8 |
+
from concurrent.futures import ThreadPoolExecutor
|
9 |
+
from typing import Dict
|
10 |
+
|
11 |
+
# Configure logging
|
12 |
+
logging.basicConfig(level=logging.INFO)
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
# Global thread pool for text generation
|
16 |
+
text_generation_pool = ThreadPoolExecutor(max_workers=4)
|
17 |
+
|
18 |
+
def generate_response(query, tokenizer, model_llm, max_new_tokens=256, temperature=0.7, top_k=30, top_p=0.8, repetition_penalty=1.05):
|
19 |
+
print("\n" + "="*50)
|
20 |
+
print("GENERATE RESPONSE DEBUG")
|
21 |
+
print(f"Input Query: {query}")
|
22 |
+
print("="*50 + "\n")
|
23 |
+
|
24 |
+
print("Generation Parameters:")
|
25 |
+
print(f"- Max New Tokens: {max_new_tokens}")
|
26 |
+
print(f"- Temperature: {temperature}")
|
27 |
+
print(f"- Top-K Sampling: {top_k}")
|
28 |
+
print(f"- Top-P Sampling: {top_p}")
|
29 |
+
print(f"- Repetition Penalty: {repetition_penalty}")
|
30 |
+
print(f"- Sampling Enabled: True (do_sample=True)\n")
|
31 |
+
|
32 |
+
# Format the input text without the assistant prefix
|
33 |
+
input_text = f"""User: {query}
|
34 |
+
Assistant: I am a concise real estate chatbot. I'll provide a clear, direct answer about:
|
35 |
+
"""
|
36 |
+
print(f"Formatted Input Text: {input_text}\n")
|
37 |
+
inputs = tokenizer(input_text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
38 |
+
|
39 |
+
start_time = time.time()
|
40 |
+
|
41 |
+
try:
|
42 |
+
print("Generating response...")
|
43 |
+
|
44 |
+
# Use parallel processing for generation
|
45 |
+
def generate_batch(batch_inputs):
|
46 |
+
with torch.no_grad():
|
47 |
+
outputs = model_llm.generate(
|
48 |
+
batch_inputs.input_ids,
|
49 |
+
max_new_tokens=max_new_tokens,
|
50 |
+
temperature=temperature,
|
51 |
+
top_k=top_k,
|
52 |
+
top_p=top_p,
|
53 |
+
repetition_penalty=repetition_penalty,
|
54 |
+
do_sample=True,
|
55 |
+
eos_token_id=tokenizer.eos_token_id,
|
56 |
+
pad_token_id=tokenizer.pad_token_id
|
57 |
+
)
|
58 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
59 |
+
|
60 |
+
# Submit generation task to thread pool
|
61 |
+
future = text_generation_pool.submit(generate_batch, inputs)
|
62 |
+
response = future.result()
|
63 |
+
|
64 |
+
# Clean up the response
|
65 |
+
response = response.replace(input_text, "").strip()
|
66 |
+
|
67 |
+
# Remove any remaining prefixes or instructions
|
68 |
+
cleanup_patterns = [
|
69 |
+
"USER QUERY:",
|
70 |
+
"PROPERTIES:",
|
71 |
+
"CHATBOT INSTRUCTIONS:",
|
72 |
+
"Assistant:",
|
73 |
+
"I am a concise real estate chatbot.",
|
74 |
+
"I'll provide a clear, direct answer about:"
|
75 |
+
]
|
76 |
+
|
77 |
+
for pattern in cleanup_patterns:
|
78 |
+
if pattern in response:
|
79 |
+
response = response.split(pattern)[-1].strip()
|
80 |
+
|
81 |
+
# Remove any empty lines and normalize spacing
|
82 |
+
response = "\n".join(line.strip() for line in response.split("\n") if line.strip())
|
83 |
+
|
84 |
+
end_time = time.time()
|
85 |
+
duration = end_time - start_time
|
86 |
+
|
87 |
+
print("\nGeneration Results:")
|
88 |
+
print(f"Raw Response: {response}")
|
89 |
+
print(f"Duration: {duration:.2f} seconds\n")
|
90 |
+
print("="*50 + "\n")
|
91 |
+
|
92 |
+
return response, duration
|
93 |
+
|
94 |
+
except Exception as e:
|
95 |
+
logger.error(f"\nERROR in generate_response: {str(e)}")
|
96 |
+
logging.error(f"Error generating response: {e}")
|
97 |
+
return "An error occurred while generating the response.", None
|
98 |
+
|
99 |
+
def format_field_name(field_name):
|
100 |
+
"""Convert camelCase or PascalCase field names to space-separated words"""
|
101 |
+
formatted = re.sub(r'([A-Z])', r' \1', field_name).strip()
|
102 |
+
formatted = ' '.join(word.capitalize() for word in formatted.split())
|
103 |
+
return formatted
|
104 |
+
|
105 |
+
def format_llm_prompt(query, filtered_results, user_plan, original_query):
|
106 |
+
"""Format the prompt for LLM with all property details"""
|
107 |
+
try:
|
108 |
+
response_text = (
|
109 |
+
f"USER QUERY: {original_query}\n\n"
|
110 |
+
f"PROPERTIES:\n"
|
111 |
+
)
|
112 |
+
|
113 |
+
# Parallel processing of property formatting
|
114 |
+
def format_property(property_data):
|
115 |
+
property_info = property_data['property']
|
116 |
+
formatted_text = ""
|
117 |
+
|
118 |
+
# Include all property information
|
119 |
+
for key, value in property_info.items():
|
120 |
+
if key not in ["propertyImages", "property_image", "image_url"]:
|
121 |
+
formatted_key = format_field_name(key)
|
122 |
+
if key in ["ZipCode", "LeasableSquareFeet", "YearBuilt", "NumberOfRooms",
|
123 |
+
"ParkingSpaces", "ViewNumber", "Contact", "TotalSquareFeet",
|
124 |
+
"Beds", "Baths"] and isinstance(value, (int, float)):
|
125 |
+
value = int(value)
|
126 |
+
formatted_text += f"{formatted_key}: {value}\n"
|
127 |
+
|
128 |
+
return formatted_text
|
129 |
+
|
130 |
+
# Process properties in parallel
|
131 |
+
property_texts = parallel_map(format_property, filtered_results)
|
132 |
+
for i, text in enumerate(property_texts, 1):
|
133 |
+
response_text += f"\n{i}. {text}"
|
134 |
+
|
135 |
+
response_text += (
|
136 |
+
"\nCHATBOT INSTRUCTIONS:\n"
|
137 |
+
"1. You are a REAL ESTATE CHATBOT. Be direct and conversational.\n"
|
138 |
+
"2. Keep responses CONCISE.\n"
|
139 |
+
"3. Focus ONLY on answering the user's specific question.\n"
|
140 |
+
"4. Use simple formatting: property names in **bold**, separate properties with bullet points.\n"
|
141 |
+
"5. Avoid phrases like 'I found' or 'Based on the information' - just give the facts.\n"
|
142 |
+
"6. Speak in a friendly, helpful tone as if texting a client.\n"
|
143 |
+
"7. Start with a friendly greeting or opening line like 'Here's what I found for you!' or 'Great question!'\n"
|
144 |
+
"8. End with a friendly follow-up question like 'Would you like more details?' or 'Is there a specific property you're interested in?'\n"
|
145 |
+
)
|
146 |
+
|
147 |
+
return response_text, False
|
148 |
+
|
149 |
+
except Exception as e:
|
150 |
+
logging.error(f"Error in format_llm_prompt: {str(e)}")
|
151 |
+
return f"USER QUERY: {original_query}\n\nPROPERTIES:\n\nI apologize, but I encountered an error processing your request. Please try again.", False
|
152 |
+
|
153 |
+
def convert_numeric_fields_to_int(property_dict):
|
154 |
+
"""Convert numeric fields from float to int for better display"""
|
155 |
+
int_fields = [
|
156 |
+
"ZipCode", "LeasableSquareFeet", "YearBuilt", "NumberOfRooms",
|
157 |
+
"ParkingSpaces", "ViewNumber", "Contact", "TotalSquareFeet",
|
158 |
+
"Beds", "Baths"
|
159 |
+
]
|
160 |
+
|
161 |
+
# Parallel processing of numeric field conversion
|
162 |
+
def convert_field(field):
|
163 |
+
if field in property_dict and isinstance(property_dict[field], (int, float)):
|
164 |
+
try:
|
165 |
+
return field, int(property_dict[field])
|
166 |
+
except (ValueError, TypeError):
|
167 |
+
return field, property_dict[field]
|
168 |
+
return field, property_dict.get(field)
|
169 |
+
|
170 |
+
converted_fields = parallel_map(convert_field, int_fields)
|
171 |
+
for field, value in converted_fields:
|
172 |
+
property_dict[field] = value
|
173 |
+
|
174 |
+
return property_dict
|
175 |
+
|
176 |
+
def filter_property_by_plan(property_dict, plan):
|
177 |
+
"""Return all property data without filtering"""
|
178 |
+
try:
|
179 |
+
# Return all property data
|
180 |
+
filtered_property = {
|
181 |
+
**property_dict,
|
182 |
+
'propertyImages': property_dict.get('property_image', []),
|
183 |
+
}
|
184 |
+
|
185 |
+
return filtered_property
|
186 |
+
|
187 |
+
except Exception as e:
|
188 |
+
logging.error(f"Error in filter_property_by_plan: {str(e)}")
|
189 |
+
raise
|
190 |
+
|
191 |
+
def format_response(self, response: Dict) -> Dict:
|
192 |
+
"""Format the response for frontend display"""
|
193 |
+
print("\n=== Formatting response for frontend ===")
|
194 |
+
try:
|
195 |
+
# Extract only the response text, removing any prompt or debug information
|
196 |
+
response_text = response.get("response", "")
|
197 |
+
|
198 |
+
# Clean up the response text by removing unwanted prefixes
|
199 |
+
cleanup_patterns = [
|
200 |
+
"USER QUERY:",
|
201 |
+
"PROPERTIES:",
|
202 |
+
"CHATBOT INSTRUCTIONS:",
|
203 |
+
"Assistant:",
|
204 |
+
"I am a concise real estate chatbot.",
|
205 |
+
"I'll provide a clear, direct answer about:"
|
206 |
+
]
|
207 |
+
|
208 |
+
# Remove each pattern if it exists
|
209 |
+
for pattern in cleanup_patterns:
|
210 |
+
if pattern in response_text:
|
211 |
+
response_text = response_text.split(pattern)[-1].strip()
|
212 |
+
|
213 |
+
# Remove any remaining debug information
|
214 |
+
if "DEBUG" in response_text:
|
215 |
+
response_text = response_text.split("DEBUG")[0].strip()
|
216 |
+
|
217 |
+
# Remove any empty lines and normalize spacing
|
218 |
+
response_text = "\n".join(line.strip() for line in response_text.split("\n") if line.strip())
|
219 |
+
|
220 |
+
# Format the response
|
221 |
+
formatted = {
|
222 |
+
"response": response_text,
|
223 |
+
"properties": response.get("properties", []),
|
224 |
+
"status": "success"
|
225 |
+
}
|
226 |
+
print(f"Formatted response: {formatted}")
|
227 |
+
return formatted
|
228 |
+
except Exception as e:
|
229 |
+
print(f"Error formatting response: {str(e)}")
|
230 |
+
return {
|
231 |
+
"response": "I apologize, but I encountered an error processing your request.",
|
232 |
+
"properties": [],
|
233 |
+
"status": "error"
|
234 |
+
}
|
235 |
+
|
236 |
+
def send_response(self, response: Dict) -> Dict:
|
237 |
+
"""Send response to frontend"""
|
238 |
+
print("\n=== Sending response to frontend ===")
|
239 |
+
try:
|
240 |
+
formatted_response = self.format_response(response)
|
241 |
+
print(f"Sending response: {formatted_response}")
|
242 |
+
return formatted_response
|
243 |
+
except Exception as e:
|
244 |
+
print(f"Error sending response: {str(e)}")
|
245 |
+
return {
|
246 |
+
"response": "I apologize, but I encountered an error processing your request.",
|
247 |
+
"properties": [],
|
248 |
+
"status": "error"
|
249 |
+
}
|
modules/security.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import logging
|
3 |
+
from collections import defaultdict
|
4 |
+
from better_profanity import profanity
|
5 |
+
from modules.config import (
|
6 |
+
RATE_LIMIT_WINDOW,
|
7 |
+
MAX_REQUESTS_PER_WINDOW,
|
8 |
+
CACHE_TTL,
|
9 |
+
MAX_QUERY_LENGTH,
|
10 |
+
UserPlan,
|
11 |
+
PLAN_FIELDS
|
12 |
+
)
|
13 |
+
import torch
|
14 |
+
import numpy as np
|
15 |
+
from sentence_transformers import util
|
16 |
+
import re
|
17 |
+
import bleach
|
18 |
+
import threading
|
19 |
+
from functools import wraps
|
20 |
+
from transformers import pipeline
|
21 |
+
|
22 |
+
# Thread local storage for user plan
|
23 |
+
_thread_local = threading.local()
|
24 |
+
|
25 |
+
def get_current_plan():
|
26 |
+
"""Get the current user plan from thread local storage"""
|
27 |
+
return getattr(_thread_local, 'current_plan', UserPlan.PLUS)
|
28 |
+
|
29 |
+
def set_current_plan(plan):
|
30 |
+
"""Set the current user plan in thread local storage"""
|
31 |
+
_thread_local.current_plan = plan
|
32 |
+
|
33 |
+
def with_user_plan(f):
|
34 |
+
"""Decorator to handle user plan from request"""
|
35 |
+
@wraps(f)
|
36 |
+
def decorated_function(*args, **kwargs):
|
37 |
+
try:
|
38 |
+
from flask import request
|
39 |
+
plan = UserPlan.BASIC # Default to BASIC plan
|
40 |
+
|
41 |
+
if request.is_json:
|
42 |
+
plan_str = request.json.get('user_plan', 'basic').lower()
|
43 |
+
try:
|
44 |
+
plan = UserPlan(plan_str)
|
45 |
+
except ValueError:
|
46 |
+
logging.warning(f"Invalid plan value: {plan_str}, defaulting to BASIC")
|
47 |
+
plan = UserPlan.BASIC
|
48 |
+
|
49 |
+
set_current_plan(plan)
|
50 |
+
return f(*args, **kwargs)
|
51 |
+
except Exception as e:
|
52 |
+
logging.error(f"Error in with_user_plan decorator: {str(e)}")
|
53 |
+
set_current_plan(UserPlan.BASIC) # Ensure BASIC plan is set even on error
|
54 |
+
return f(*args, **kwargs)
|
55 |
+
return decorated_function
|
56 |
+
|
57 |
+
class SecurityManager:
|
58 |
+
def __init__(self):
|
59 |
+
self.request_counts = defaultdict(lambda: {'count': 0, 'window_start': 0})
|
60 |
+
|
61 |
+
def check_rate_limit(self, ip_address):
|
62 |
+
current_time = time.time()
|
63 |
+
if current_time - self.request_counts[ip_address]['window_start'] >= RATE_LIMIT_WINDOW:
|
64 |
+
self.request_counts[ip_address] = {'count': 0, 'window_start': current_time}
|
65 |
+
|
66 |
+
self.request_counts[ip_address]['count'] += 1
|
67 |
+
return self.request_counts[ip_address]['count'] <= MAX_REQUESTS_PER_WINDOW
|
68 |
+
|
69 |
+
class QueryValidator:
|
70 |
+
def __init__(self, model_embedding):
|
71 |
+
self.model_embedding = model_embedding
|
72 |
+
self.domain_classifier = pipeline(
|
73 |
+
"zero-shot-classification",
|
74 |
+
model="facebook/bart-large-mnli",
|
75 |
+
device=0 if torch.cuda.is_available() else -1
|
76 |
+
)
|
77 |
+
|
78 |
+
# Real estate related categories
|
79 |
+
self.real_estate_categories = [
|
80 |
+
"property search",
|
81 |
+
"rental property",
|
82 |
+
"property for sale",
|
83 |
+
"PG accommodation",
|
84 |
+
"hostel accommodation",
|
85 |
+
"commercial property",
|
86 |
+
"property details",
|
87 |
+
"property location",
|
88 |
+
"property price",
|
89 |
+
"property features"
|
90 |
+
]
|
91 |
+
|
92 |
+
# Initialize with examples
|
93 |
+
self.initialize_with_examples()
|
94 |
+
|
95 |
+
def initialize_with_examples(self):
|
96 |
+
"""Initialize with example queries for better classification"""
|
97 |
+
self.real_estate_examples = [
|
98 |
+
"Show me 2BHK apartments in Hyderabad",
|
99 |
+
"Find PG accommodation near Hitech City",
|
100 |
+
"What are the properties for sale in Madhapur?",
|
101 |
+
"Looking for a 3BHK villa in Gachibowli",
|
102 |
+
"Need a girls hostel in Kondapur",
|
103 |
+
"Show me commercial properties for rent",
|
104 |
+
"Find properties near my location",
|
105 |
+
"What's the price of 2BHK in Gachibowli?",
|
106 |
+
"Show me properties with swimming pool",
|
107 |
+
"Find PG with food facility",
|
108 |
+
"Looking for boys hostel in Madhapur",
|
109 |
+
"Show me properties near metro station",
|
110 |
+
"Find properties with 24/7 security",
|
111 |
+
"Need a furnished apartment",
|
112 |
+
"Show me properties with parking"
|
113 |
+
]
|
114 |
+
|
115 |
+
self.non_real_estate_examples = [
|
116 |
+
"What's the weather like today?",
|
117 |
+
"Tell me a joke",
|
118 |
+
"What's the time?",
|
119 |
+
"How to make pasta?",
|
120 |
+
"What's the capital of France?",
|
121 |
+
"Show me the latest news",
|
122 |
+
"Play some music",
|
123 |
+
"What's the meaning of life?",
|
124 |
+
"How to fix my computer?",
|
125 |
+
"Tell me about history"
|
126 |
+
]
|
127 |
+
|
128 |
+
def is_real_estate_query(self, query):
|
129 |
+
"""Check if the query is related to real estate using zero-shot classification"""
|
130 |
+
try:
|
131 |
+
# Handle simple responses that are part of a conversation
|
132 |
+
simple_responses = ["yes", "no", "ok", "sure", "fine", "alright"]
|
133 |
+
if query.lower().strip() in simple_responses:
|
134 |
+
# If it's a simple response, check if we're in a real estate context
|
135 |
+
# This could be enhanced by checking conversation history
|
136 |
+
return True
|
137 |
+
|
138 |
+
# First check for common real estate keywords
|
139 |
+
real_estate_keywords = [
|
140 |
+
"property", "house", "apartment", "flat", "villa", "pg", "hostel",
|
141 |
+
"rent", "sale", "buy", "accommodation", "room", "beds", "baths",
|
142 |
+
"bhk", "square feet", "location", "price", "amenities", "facilities"
|
143 |
+
]
|
144 |
+
|
145 |
+
query_lower = query.lower()
|
146 |
+
if any(keyword in query_lower for keyword in real_estate_keywords):
|
147 |
+
return True
|
148 |
+
|
149 |
+
# Use zero-shot classification for more complex cases
|
150 |
+
result = self.domain_classifier(
|
151 |
+
query,
|
152 |
+
candidate_labels=["real estate query", "non real estate query"],
|
153 |
+
hypothesis_template="This is a {}."
|
154 |
+
)
|
155 |
+
|
156 |
+
# Get the confidence score for real estate
|
157 |
+
real_estate_score = result['scores'][0] if result['labels'][0] == "real estate query" else result['scores'][1]
|
158 |
+
|
159 |
+
# Also check against our example categories
|
160 |
+
category_result = self.domain_classifier(
|
161 |
+
query,
|
162 |
+
candidate_labels=self.real_estate_categories,
|
163 |
+
hypothesis_template="This query is about {}."
|
164 |
+
)
|
165 |
+
|
166 |
+
# If any category has high confidence, consider it a real estate query
|
167 |
+
max_category_score = max(category_result['scores'])
|
168 |
+
|
169 |
+
# Consider it a real estate query if either the general classification
|
170 |
+
# or any specific category has high confidence
|
171 |
+
return real_estate_score > 0.6 or max_category_score > 0.7
|
172 |
+
|
173 |
+
except Exception as e:
|
174 |
+
logging.error(f"Error in is_real_estate_query: {str(e)}")
|
175 |
+
# Default to True if there's an error to be safe
|
176 |
+
return True
|
177 |
+
|
178 |
+
def clean_input(self, query):
|
179 |
+
"""Clean and validate the input query"""
|
180 |
+
# Remove any special characters and extra spaces
|
181 |
+
cleaned = re.sub(r'[^\w\s]', ' ', query)
|
182 |
+
cleaned = ' '.join(cleaned.split())
|
183 |
+
return cleaned
|
184 |
+
|
185 |
+
def validate_query_length(self, query):
|
186 |
+
"""Validate query length"""
|
187 |
+
return len(query) <= MAX_QUERY_LENGTH
|
188 |
+
|
189 |
+
def check_profanity(self, query):
|
190 |
+
"""Check for profanity in the query"""
|
191 |
+
# Add profanity checking logic here if needed
|
192 |
+
return True
|
requirements.txt
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core Dependencies
|
2 |
+
Flask==3.0.0
|
3 |
+
python-dotenv==1.0.0
|
4 |
+
Werkzeug==3.0.1
|
5 |
+
gunicorn==21.2.0
|
6 |
+
flask-cors==4.0.0
|
7 |
+
flask-limiter==3.5.0
|
8 |
+
|
9 |
+
# AI and Machine Learning
|
10 |
+
torch==2.2.1
|
11 |
+
transformers==4.38.2
|
12 |
+
sentence-transformers==2.5.1
|
13 |
+
faiss-cpu==1.7.4
|
14 |
+
numpy==1.26.4
|
15 |
+
pandas==2.2.1
|
16 |
+
scikit-learn==1.3.1
|
17 |
+
|
18 |
+
# Audio Processing
|
19 |
+
webrtcvad==2.0.10
|
20 |
+
SpeechRecognition==3.10.0
|
21 |
+
pydub==0.25.1
|
22 |
+
happytransformer==2.4.1
|
23 |
+
|
24 |
+
# Location and Geocoding
|
25 |
+
geopy==2.4.1
|
26 |
+
geocoder==1.38.1
|
27 |
+
|
28 |
+
# Security and Rate Limiting
|
29 |
+
better-profanity==0.7.0
|
30 |
+
bleach==6.0.0
|
31 |
+
|
32 |
+
# Utilities
|
33 |
+
python-dateutil==2.8.2
|
34 |
+
requests==2.31.0
|
35 |
+
tqdm==4.66.1
|
36 |
+
|
37 |
+
# NLP and Text Processing
|
38 |
+
spacy==3.7.4
|
39 |
+
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl
|
40 |
+
|
41 |
+
# Additional Dependencies
|
42 |
+
python-Levenshtein==0.23.0
|
43 |
+
fuzzywuzzy==0.18.0
|
templates/index.html
ADDED
@@ -0,0 +1,1824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>HIVE PROP</title>
|
7 |
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap" rel="stylesheet">
|
8 |
+
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
|
9 |
+
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css" rel="stylesheet">
|
10 |
+
<style>
|
11 |
+
/* Root Variables */
|
12 |
+
:root {
|
13 |
+
--primary-color: #31511E;
|
14 |
+
--secondary-color: #F6FCDF;
|
15 |
+
--accent-color: #859F3D;
|
16 |
+
--text-primary: rgb(26, 26, 25);
|
17 |
+
--text-secondary: rgb(49, 81, 30);
|
18 |
+
--border-radius: 20px;
|
19 |
+
--box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
20 |
+
--transition: all 0.3s ease;
|
21 |
+
--chat-bg: #F6FCDF;
|
22 |
+
--bubble-user: #ffffff;
|
23 |
+
--bubble-bot: #31511E;
|
24 |
+
}
|
25 |
+
|
26 |
+
/* Property Search Styles */
|
27 |
+
body {
|
28 |
+
font-family: 'Poppins', sans-serif;
|
29 |
+
background-color: #F6FCDF;
|
30 |
+
margin: 0;
|
31 |
+
padding: 20px;
|
32 |
+
color: var(--text-primary);
|
33 |
+
}
|
34 |
+
|
35 |
+
h1 {
|
36 |
+
text-align: center;
|
37 |
+
color: var(--primary-color);
|
38 |
+
margin-bottom: 40px;
|
39 |
+
font-size: 3rem;
|
40 |
+
font-weight: 700;
|
41 |
+
text-transform: uppercase;
|
42 |
+
letter-spacing: 2px;
|
43 |
+
position: relative;
|
44 |
+
animation: fadeIn 1s ease-in-out;
|
45 |
+
}
|
46 |
+
|
47 |
+
h1::after {
|
48 |
+
content: '';
|
49 |
+
position: absolute;
|
50 |
+
bottom: -10px;
|
51 |
+
left: 50%;
|
52 |
+
transform: translateX(-50%);
|
53 |
+
width: 100px;
|
54 |
+
height: 4px;
|
55 |
+
background: var(--primary-color);
|
56 |
+
border-radius: 2px;
|
57 |
+
}
|
58 |
+
|
59 |
+
@keyframes fadeIn {
|
60 |
+
from {
|
61 |
+
opacity: 0;
|
62 |
+
transform: translateY(-20px);
|
63 |
+
}
|
64 |
+
to {
|
65 |
+
opacity: 1;
|
66 |
+
transform: translateY(0);
|
67 |
+
}
|
68 |
+
}
|
69 |
+
|
70 |
+
/* Property Search Components */
|
71 |
+
.search-container {
|
72 |
+
display: flex;
|
73 |
+
justify-content: center;
|
74 |
+
gap: 20px;
|
75 |
+
margin-bottom: 40px;
|
76 |
+
}
|
77 |
+
|
78 |
+
#queryForm {
|
79 |
+
display: flex;
|
80 |
+
gap: 20px;
|
81 |
+
align-items: center;
|
82 |
+
}
|
83 |
+
|
84 |
+
#userQuery {
|
85 |
+
width: 400px;
|
86 |
+
padding: 15px 25px;
|
87 |
+
border: 2px solid #E1E8ED;
|
88 |
+
border-radius: var(--border-radius);
|
89 |
+
font-size: 1.1rem;
|
90 |
+
transition: var(--transition);
|
91 |
+
background: white;
|
92 |
+
box-shadow: var(--box-shadow);
|
93 |
+
}
|
94 |
+
|
95 |
+
/* Property Card Styles */
|
96 |
+
.property {
|
97 |
+
display: flex;
|
98 |
+
flex-direction: row;
|
99 |
+
background: white;
|
100 |
+
border-radius: 30px;
|
101 |
+
box-shadow: var(--box-shadow);
|
102 |
+
margin-bottom: 30px;
|
103 |
+
overflow: hidden;
|
104 |
+
width: 90%;
|
105 |
+
margin-left: auto;
|
106 |
+
margin-right: auto;
|
107 |
+
transition: var(--transition);
|
108 |
+
gap: 20px;
|
109 |
+
padding: 20px;
|
110 |
+
animation: slideUp 0.5s ease-out;
|
111 |
+
position: relative;
|
112 |
+
}
|
113 |
+
|
114 |
+
@keyframes slideUp {
|
115 |
+
from {
|
116 |
+
opacity: 0;
|
117 |
+
transform: translateY(20px);
|
118 |
+
}
|
119 |
+
to {
|
120 |
+
opacity: 1;
|
121 |
+
transform: translateY(0);
|
122 |
+
}
|
123 |
+
}
|
124 |
+
|
125 |
+
.image-container {
|
126 |
+
flex: 2;
|
127 |
+
position: relative;
|
128 |
+
overflow: hidden;
|
129 |
+
border-radius: 20px;
|
130 |
+
box-shadow: var(--box-shadow);
|
131 |
+
}
|
132 |
+
|
133 |
+
.carousel {
|
134 |
+
width: 100%;
|
135 |
+
height: 100%;
|
136 |
+
}
|
137 |
+
|
138 |
+
.carousel-images {
|
139 |
+
display: flex;
|
140 |
+
transition: transform 0.5s ease-in-out;
|
141 |
+
height: 100%;
|
142 |
+
}
|
143 |
+
|
144 |
+
.carousel-image {
|
145 |
+
width: 100%;
|
146 |
+
height: 100%;
|
147 |
+
object-fit: cover;
|
148 |
+
flex-shrink: 0;
|
149 |
+
border-radius: 20px;
|
150 |
+
}
|
151 |
+
|
152 |
+
.property-details {
|
153 |
+
flex: 3;
|
154 |
+
display: flex;
|
155 |
+
flex-direction: column;
|
156 |
+
}
|
157 |
+
|
158 |
+
/* Accordion Styles */
|
159 |
+
.accordion-section {
|
160 |
+
border-bottom: 1px solid #E1E8ED;
|
161 |
+
}
|
162 |
+
|
163 |
+
.accordion-header {
|
164 |
+
display: flex;
|
165 |
+
align-items: center;
|
166 |
+
justify-content: space-between;
|
167 |
+
padding: 15px 0;
|
168 |
+
cursor: pointer;
|
169 |
+
border-bottom: 1px solid #E1E8ED;
|
170 |
+
transition: var(--transition);
|
171 |
+
}
|
172 |
+
|
173 |
+
.accordion-header:hover {
|
174 |
+
background-color: var(--secondary-color);
|
175 |
+
border-radius: 10px;
|
176 |
+
}
|
177 |
+
|
178 |
+
.accordion-content {
|
179 |
+
display: none;
|
180 |
+
padding: 15px 0;
|
181 |
+
color: var(--text-secondary);
|
182 |
+
line-height: 1.6;
|
183 |
+
}
|
184 |
+
|
185 |
+
/* Add active class for accordion */
|
186 |
+
.accordion-header.active + .accordion-content {
|
187 |
+
display: block; /* Show content when active */
|
188 |
+
}
|
189 |
+
|
190 |
+
/* Loading Spinner */
|
191 |
+
.loading-spinner {
|
192 |
+
border: 4px solid rgba(74, 144, 226, 0.1);
|
193 |
+
border-left-color: var(--primary-color);
|
194 |
+
border-radius: 50%;
|
195 |
+
width: 40px;
|
196 |
+
height: 40px;
|
197 |
+
animation: spin 1s linear infinite;
|
198 |
+
margin: 40px auto;
|
199 |
+
}
|
200 |
+
|
201 |
+
.loading-message {
|
202 |
+
width: 40px;
|
203 |
+
height: 40px;
|
204 |
+
margin: auto auto;
|
205 |
+
}
|
206 |
+
|
207 |
+
@keyframes spin {
|
208 |
+
0% { transform: rotate(0deg); }
|
209 |
+
100% { transform: rotate(360deg); }
|
210 |
+
}
|
211 |
+
|
212 |
+
.error-message {
|
213 |
+
color: #DC3545;
|
214 |
+
text-align: center;
|
215 |
+
font-weight: 500;
|
216 |
+
margin-top: 20px;
|
217 |
+
}
|
218 |
+
|
219 |
+
@media (max-width: 1024px) {
|
220 |
+
.property {
|
221 |
+
flex-direction: column;
|
222 |
+
width: 95%;
|
223 |
+
}
|
224 |
+
|
225 |
+
.image-container {
|
226 |
+
width: 100%;
|
227 |
+
height: 300px;
|
228 |
+
}
|
229 |
+
|
230 |
+
#userQuery {
|
231 |
+
width: 300px;
|
232 |
+
}
|
233 |
+
}
|
234 |
+
|
235 |
+
@media (max-width: 768px) {
|
236 |
+
.search-container {
|
237 |
+
flex-direction: column;
|
238 |
+
align-items: center;
|
239 |
+
}
|
240 |
+
|
241 |
+
#queryForm {
|
242 |
+
flex-direction: column;
|
243 |
+
width: 100%;
|
244 |
+
max-width: 400px;
|
245 |
+
}
|
246 |
+
|
247 |
+
#userQuery {
|
248 |
+
width: 100%;
|
249 |
+
}
|
250 |
+
|
251 |
+
button {
|
252 |
+
width: 100%;
|
253 |
+
}
|
254 |
+
}
|
255 |
+
|
256 |
+
#userQuery:focus {
|
257 |
+
outline: none;
|
258 |
+
border-color: var(--primary-color);
|
259 |
+
box-shadow: 0 0 0 3px rgba(74, 144, 226, 0.2);
|
260 |
+
}
|
261 |
+
|
262 |
+
#userQuery::placeholder {
|
263 |
+
color: #A4A4A4;
|
264 |
+
font-weight: 300;
|
265 |
+
}
|
266 |
+
|
267 |
+
.property:hover {
|
268 |
+
transform: translateY(-5px);
|
269 |
+
box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1);
|
270 |
+
}
|
271 |
+
|
272 |
+
.carousel-nav {
|
273 |
+
position: absolute;
|
274 |
+
bottom: 20px;
|
275 |
+
left: 50%;
|
276 |
+
transform: translateX(-50%);
|
277 |
+
display: flex;
|
278 |
+
gap: 10px;
|
279 |
+
}
|
280 |
+
|
281 |
+
.carousel-dot {
|
282 |
+
width: 10px;
|
283 |
+
height: 10px;
|
284 |
+
border-radius: 50%;
|
285 |
+
background: rgba(255, 255, 255, 0.5);
|
286 |
+
cursor: pointer;
|
287 |
+
transition: var(--transition);
|
288 |
+
}
|
289 |
+
|
290 |
+
.carousel-dot.active {
|
291 |
+
background: white;
|
292 |
+
}
|
293 |
+
|
294 |
+
.property-header {
|
295 |
+
display: flex;
|
296 |
+
justify-content: space-between;
|
297 |
+
align-items: center;
|
298 |
+
}
|
299 |
+
|
300 |
+
.property-header h2 {
|
301 |
+
font-size: 1.8rem;
|
302 |
+
font-weight: 700;
|
303 |
+
color: var(--text-primary);
|
304 |
+
margin: 0 0 10px 0;
|
305 |
+
}
|
306 |
+
|
307 |
+
.property-type {
|
308 |
+
display: inline-block;
|
309 |
+
padding: 8px 16px;
|
310 |
+
background-color: var(--accent-color);
|
311 |
+
color: white;
|
312 |
+
border-radius: 20px;
|
313 |
+
font-size: 0.9rem;
|
314 |
+
font-weight: 500;
|
315 |
+
}
|
316 |
+
|
317 |
+
.property-info {
|
318 |
+
display: grid;
|
319 |
+
grid-template-columns: repeat(auto-fit, minmax(100px, 1fr));
|
320 |
+
gap: 20px;
|
321 |
+
padding: 20px 0;
|
322 |
+
border-top: 1px solid #E1E8ED;
|
323 |
+
border-bottom: 1px solid #E1E8ED;
|
324 |
+
max-height: 300px; /* Set a max height for scrolling */
|
325 |
+
overflow-y: auto; /* Enable vertical scrolling */
|
326 |
+
scrollbar-width: thin; /* For Firefox */
|
327 |
+
scrollbar-color: var(--primary-color) var(--secondary-color); /* For Firefox */
|
328 |
+
}
|
329 |
+
|
330 |
+
.property-info::-webkit-scrollbar {
|
331 |
+
width: 8px; /* Width of the scrollbar */
|
332 |
+
}
|
333 |
+
|
334 |
+
.property-info::-webkit-scrollbar-track {
|
335 |
+
background: var(--secondary-color); /* Color of the scrollbar track */
|
336 |
+
border-radius: 10px;
|
337 |
+
}
|
338 |
+
|
339 |
+
.property-info::-webkit-scrollbar-thumb {
|
340 |
+
background: var(--primary-color); /* Color of the scrollbar thumb */
|
341 |
+
border-radius: 10px;
|
342 |
+
}
|
343 |
+
|
344 |
+
.property-info::-webkit-scrollbar-thumb:hover {
|
345 |
+
background: var(--accent-color); /* Color of the scrollbar thumb on hover */
|
346 |
+
}
|
347 |
+
|
348 |
+
.description {
|
349 |
+
margin: 10px 0;
|
350 |
+
line-height: 1.6;
|
351 |
+
color: var(--text-secondary);
|
352 |
+
}
|
353 |
+
|
354 |
+
.key-features {
|
355 |
+
display: flex;
|
356 |
+
flex-wrap: wrap;
|
357 |
+
gap: 10px;
|
358 |
+
margin: 10px 0;
|
359 |
+
align-items: center;
|
360 |
+
}
|
361 |
+
|
362 |
+
.feature-pill {
|
363 |
+
background-color: var(--secondary-color);
|
364 |
+
padding: 2px 10px;
|
365 |
+
border-radius: 10px;
|
366 |
+
font-size: 0.9rem;
|
367 |
+
color: var(--text-primary);
|
368 |
+
font-weight: 500;
|
369 |
+
transition: var(--transition);
|
370 |
+
border: 2px solid var(--primary-color); /* Added border */
|
371 |
+
}
|
372 |
+
|
373 |
+
.feature-pill:hover {
|
374 |
+
background-color: var(--primary-color);
|
375 |
+
color: white;
|
376 |
+
transform: translateY(-2px);
|
377 |
+
}
|
378 |
+
|
379 |
+
.amenities-card {
|
380 |
+
background-color: var(--secondary-color);
|
381 |
+
border-radius: var(--border-radius);
|
382 |
+
padding: 20px;
|
383 |
+
margin: 10px 0;
|
384 |
+
}
|
385 |
+
|
386 |
+
.amenities-pills {
|
387 |
+
display: flex;
|
388 |
+
flex-wrap: wrap;
|
389 |
+
gap: 10px;
|
390 |
+
margin-top: 10px;
|
391 |
+
}
|
392 |
+
|
393 |
+
.amenity-pill {
|
394 |
+
background-color: var(--secondary-color);
|
395 |
+
padding: 2px 10px;
|
396 |
+
border-radius: 10px;
|
397 |
+
font-size: 0.9rem;
|
398 |
+
color: var(--text-primary);
|
399 |
+
font-weight: 500;
|
400 |
+
transition: var(--transition);
|
401 |
+
border: 2px solid var(--primary-color); /* Added border */
|
402 |
+
}
|
403 |
+
|
404 |
+
.amenity-pill:hover {
|
405 |
+
background-color: var(--primary-color);
|
406 |
+
color: white;
|
407 |
+
transform: translateY(-2px);
|
408 |
+
}
|
409 |
+
|
410 |
+
.construction-status {
|
411 |
+
display: flex;
|
412 |
+
justify-content: space-between;
|
413 |
+
padding-top: 20px;
|
414 |
+
color: var(--text-secondary);
|
415 |
+
}
|
416 |
+
|
417 |
+
.accordion {
|
418 |
+
background-color: var(--secondary-color);
|
419 |
+
border-radius: var(--border-radius);
|
420 |
+
box-shadow: var(--box-shadow);
|
421 |
+
margin-bottom: 20px;
|
422 |
+
overflow: hidden;
|
423 |
+
}
|
424 |
+
|
425 |
+
.accordion-header:hover {
|
426 |
+
}
|
427 |
+
|
428 |
+
.accordion-header .arrow {
|
429 |
+
transition: transform 0.3s ease;
|
430 |
+
}
|
431 |
+
|
432 |
+
.accordion-header.active .arrow {
|
433 |
+
transform: rotate(90deg);
|
434 |
+
}
|
435 |
+
|
436 |
+
.accordion-header strong {
|
437 |
+
font-size: 1.1rem;
|
438 |
+
color: var(--text-primary);
|
439 |
+
display: flex;
|
440 |
+
align-items: center;
|
441 |
+
gap: 10px;
|
442 |
+
}
|
443 |
+
|
444 |
+
.accordion-arrow {
|
445 |
+
transition: transform 0.3s ease;
|
446 |
+
font-size: 0.9rem;
|
447 |
+
color: var(--primary-color);
|
448 |
+
}
|
449 |
+
|
450 |
+
.accordion-arrow.active {
|
451 |
+
transform: rotate(180deg);
|
452 |
+
}
|
453 |
+
|
454 |
+
.accordion-section:last-child {
|
455 |
+
border-bottom: none;
|
456 |
+
}
|
457 |
+
|
458 |
+
.accordion-header.active + .accordion-content {
|
459 |
+
display: block; /* Show content when active */
|
460 |
+
}
|
461 |
+
|
462 |
+
.favorite-button {
|
463 |
+
position: absolute;
|
464 |
+
top: 10px;
|
465 |
+
right: 10px;
|
466 |
+
background: none;
|
467 |
+
border: none;
|
468 |
+
color: white;
|
469 |
+
cursor: pointer;
|
470 |
+
padding: 10px;
|
471 |
+
border-radius: 50px;
|
472 |
+
display: flex;
|
473 |
+
background-color: var(--primary-color);
|
474 |
+
}
|
475 |
+
|
476 |
+
.favorite-button.active {
|
477 |
+
color: var(--primary-color);
|
478 |
+
background-color: var(--accent-color);
|
479 |
+
border: 2px solid var(--primary-color);
|
480 |
+
}
|
481 |
+
|
482 |
+
/* Chat Assistant Styles */
|
483 |
+
.chatbot-container {
|
484 |
+
position: fixed;
|
485 |
+
bottom: 2rem;
|
486 |
+
left: 0;
|
487 |
+
right: 0;
|
488 |
+
display: flex;
|
489 |
+
justify-content: space-between;
|
490 |
+
padding: 0 2rem;
|
491 |
+
z-index: 1000;
|
492 |
+
}
|
493 |
+
|
494 |
+
.chatbot-icon {
|
495 |
+
width: 3.5rem;
|
496 |
+
height: 3.5rem;
|
497 |
+
background: var(--primary-color);
|
498 |
+
border-radius: 50%;
|
499 |
+
display: flex;
|
500 |
+
align-items: center;
|
501 |
+
justify-content: center;
|
502 |
+
cursor: pointer;
|
503 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
|
504 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
505 |
+
}
|
506 |
+
|
507 |
+
.chatbot-icon:hover {
|
508 |
+
transform: scale(1.1);
|
509 |
+
box-shadow: 0 6px 16px rgba(0, 0, 0, 0.2);
|
510 |
+
}
|
511 |
+
|
512 |
+
.chatbot-icon i {
|
513 |
+
color: white;
|
514 |
+
font-size: 1.5rem;
|
515 |
+
}
|
516 |
+
|
517 |
+
.chat-container {
|
518 |
+
position: fixed;
|
519 |
+
bottom: 6rem;
|
520 |
+
width: 450px;
|
521 |
+
height: 35rem;
|
522 |
+
background: var(--chat-bg);
|
523 |
+
border-radius: 1rem;
|
524 |
+
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
|
525 |
+
display: none;
|
526 |
+
flex-direction: column;
|
527 |
+
overflow: hidden;
|
528 |
+
z-index: 999;
|
529 |
+
animation: slideUp 0.3s ease-out;
|
530 |
+
border: 1px solid var(--primary-color); /* Added border */
|
531 |
+
}
|
532 |
+
|
533 |
+
.chat-header {
|
534 |
+
background: var(--primary-color);
|
535 |
+
color: white;
|
536 |
+
padding: 1rem;
|
537 |
+
display: flex;
|
538 |
+
align-items: center;
|
539 |
+
gap: 0.5rem;
|
540 |
+
position: relative;
|
541 |
+
border-top-left-radius: 1rem;
|
542 |
+
border-top-right-radius: 1rem;
|
543 |
+
}
|
544 |
+
|
545 |
+
.chat-header img {
|
546 |
+
width: 2rem;
|
547 |
+
height: 2rem;
|
548 |
+
border-radius: 50%;
|
549 |
+
}
|
550 |
+
|
551 |
+
.chat-header .close-button {
|
552 |
+
position: absolute;
|
553 |
+
right: 1rem;
|
554 |
+
background: none;
|
555 |
+
border: none;
|
556 |
+
font-size: 1.5rem;
|
557 |
+
color: white;
|
558 |
+
cursor: pointer;
|
559 |
+
}
|
560 |
+
|
561 |
+
/* Custom Scrollbar for Chatbot */
|
562 |
+
.chat-body::-webkit-scrollbar {
|
563 |
+
width: 12px; /* Width of the scrollbar */
|
564 |
+
}
|
565 |
+
|
566 |
+
.chat-body::-webkit-scrollbar-track {
|
567 |
+
background: var(--secondary-color); /* Color of the scrollbar track */
|
568 |
+
border-radius: 20px; /* Rounded corners for the track */
|
569 |
+
border: 2px solid var(--primary-color); /* Border around the track */
|
570 |
+
}
|
571 |
+
|
572 |
+
.chat-body::-webkit-scrollbar-thumb {
|
573 |
+
background: var(--primary-color); /* Color of the scrollbar thumb */
|
574 |
+
border-radius: 50%; /* Rounded corners for the thumb */
|
575 |
+
border: 2px solid var(--secondary-color); /* Border around the thumb */
|
576 |
+
}
|
577 |
+
|
578 |
+
.chat-body::-webkit-scrollbar-thumb:hover {
|
579 |
+
background: var(--accent-color); /* Color of the scrollbar thumb on hover */
|
580 |
+
}
|
581 |
+
|
582 |
+
/* For Firefox */
|
583 |
+
.chat-body {
|
584 |
+
scrollbar-width: thin; /* Width of the scrollbar */
|
585 |
+
scrollbar-color: var(--primary-color) var(--secondary-color); /* Color of the scrollbar thumb and track */
|
586 |
+
flex: 1;
|
587 |
+
padding: 1rem;
|
588 |
+
overflow-y: auto;
|
589 |
+
background: var(--background-color); /* Color of the scrollbar thumb and track */
|
590 |
+
}
|
591 |
+
|
592 |
+
.message {
|
593 |
+
display: flex;
|
594 |
+
flex-direction: column;
|
595 |
+
margin-bottom: 1rem;
|
596 |
+
}
|
597 |
+
|
598 |
+
.message-content {
|
599 |
+
max-width: 80%;
|
600 |
+
padding: 0.75rem 1rem;
|
601 |
+
border-radius: 1rem;
|
602 |
+
position: relative;
|
603 |
+
animation: messageAppear 0.3s ease-out;
|
604 |
+
}
|
605 |
+
|
606 |
+
@keyframes messageAppear {
|
607 |
+
from {
|
608 |
+
opacity: 0;
|
609 |
+
transform: translateY(10px);
|
610 |
+
}
|
611 |
+
to {
|
612 |
+
opacity: 1;
|
613 |
+
transform: translateY(0);
|
614 |
+
}
|
615 |
+
}
|
616 |
+
|
617 |
+
.user-message {
|
618 |
+
align-items: flex-end;
|
619 |
+
}
|
620 |
+
|
621 |
+
.bot-message {
|
622 |
+
align-items: flex-start;
|
623 |
+
}
|
624 |
+
|
625 |
+
.user-message .message-content {
|
626 |
+
background: var(--bubble-user);
|
627 |
+
color: var(--text-color);
|
628 |
+
border-bottom-right-radius: 0.25rem;
|
629 |
+
}
|
630 |
+
|
631 |
+
.bot-message .message-content {
|
632 |
+
background: var(--bubble-bot);
|
633 |
+
color: white;
|
634 |
+
border-bottom-left-radius: 0.25rem;
|
635 |
+
}
|
636 |
+
|
637 |
+
.typing-indicator {
|
638 |
+
display: none;
|
639 |
+
padding: 0.75rem 1rem;
|
640 |
+
background: var(--bubble-bot);
|
641 |
+
color: white;
|
642 |
+
border-radius: 1rem;
|
643 |
+
border-bottom-left-radius: 0.25rem;
|
644 |
+
margin-bottom: 1rem;
|
645 |
+
width: fit-content;
|
646 |
+
}
|
647 |
+
|
648 |
+
.typing-dot {
|
649 |
+
display: inline-block;
|
650 |
+
width: 0.5rem;
|
651 |
+
height: 0.5rem;
|
652 |
+
margin: 0 0.1rem;
|
653 |
+
background: white;
|
654 |
+
border-radius: 50%;
|
655 |
+
animation: typing 1.4s infinite ease-in-out;
|
656 |
+
}
|
657 |
+
|
658 |
+
.typing-dot:nth-child(1) { animation-delay: 200ms; }
|
659 |
+
.typing-dot:nth-child(2) { animation-delay: 300ms; }
|
660 |
+
.typing-dot:nth-child(3) { animation-delay: 400ms; }
|
661 |
+
|
662 |
+
@keyframes typing {
|
663 |
+
0%, 60%, 100% { transform: translateY(0); }
|
664 |
+
30% { transform: translateY(-6px); }
|
665 |
+
}
|
666 |
+
|
667 |
+
.chat-footer {
|
668 |
+
padding: 1rem;
|
669 |
+
background: var(--chat-bg);
|
670 |
+
border-top: 1px solid rgba(0, 0, 0, 0.1);
|
671 |
+
}
|
672 |
+
|
673 |
+
.input-group {
|
674 |
+
display: flex;
|
675 |
+
gap: 0.5rem;
|
676 |
+
align-items: center;
|
677 |
+
}
|
678 |
+
|
679 |
+
.chat-input {
|
680 |
+
flex: 1;
|
681 |
+
padding: 0.75rem 1rem;
|
682 |
+
border: 1px solid rgba(0, 0, 0, 0.1);
|
683 |
+
border-radius: 1.5rem;
|
684 |
+
outline: none;
|
685 |
+
font-size: 0.95rem;
|
686 |
+
transition: border-color 0.3s ease;
|
687 |
+
}
|
688 |
+
|
689 |
+
.chat-input:focus {
|
690 |
+
border-color: var(--primary-color);
|
691 |
+
}
|
692 |
+
|
693 |
+
.send-chatbutton {
|
694 |
+
background: var(--bubble-bot);
|
695 |
+
color: white;
|
696 |
+
border: 2px solid var(--primary-color); /* Added border */
|
697 |
+
width: 2.5rem;
|
698 |
+
height: 2.5rem;
|
699 |
+
border-radius: 50%;
|
700 |
+
cursor: pointer;
|
701 |
+
display: flex;
|
702 |
+
align-items: center;
|
703 |
+
justify-content: center;
|
704 |
+
transition: background-color 0.3s ease;
|
705 |
+
border-radius: 20px;
|
706 |
+
}
|
707 |
+
|
708 |
+
.send-chatbutton:hover {
|
709 |
+
background: var(--accent-color);
|
710 |
+
}
|
711 |
+
|
712 |
+
.timestamp {
|
713 |
+
font-size: 0.75rem;
|
714 |
+
color: #64748b;
|
715 |
+
margin-top: 0.25rem;
|
716 |
+
}
|
717 |
+
|
718 |
+
.property-card {
|
719 |
+
background: #f8fafc;
|
720 |
+
border-radius: 0.5rem;
|
721 |
+
padding: 1rem;
|
722 |
+
margin: 0.5rem 0;
|
723 |
+
border-left: 4px solid var(--primary-color);
|
724 |
+
box-shadow: var(--box-shadow);
|
725 |
+
}
|
726 |
+
|
727 |
+
.property-name {
|
728 |
+
font-weight: bold;
|
729 |
+
color: var(--primary-color);
|
730 |
+
margin-bottom: 0.5rem;
|
731 |
+
}
|
732 |
+
|
733 |
+
button {
|
734 |
+
padding: 15px 30px;
|
735 |
+
background-color: var(--primary-color);
|
736 |
+
color: white;
|
737 |
+
border: none;
|
738 |
+
border-radius: var(--border-radius);
|
739 |
+
font-size: 1.1rem;
|
740 |
+
font-weight: 600;
|
741 |
+
cursor: pointer;
|
742 |
+
transition: var(--transition);
|
743 |
+
font-family: 'Poppins', sans-serif;
|
744 |
+
box-shadow: var(--box-shadow);
|
745 |
+
}
|
746 |
+
|
747 |
+
button:hover {
|
748 |
+
background-color: #859F3D;
|
749 |
+
transform: translateY(-2px);
|
750 |
+
box-shadow: var(--box-shadow);
|
751 |
+
}
|
752 |
+
|
753 |
+
.quick-keywords::-webkit-scrollbar {
|
754 |
+
display: none;
|
755 |
+
}
|
756 |
+
|
757 |
+
.quick-keyword {
|
758 |
+
flex: 0 0 auto; /* Prevent buttons from shrinking */
|
759 |
+
padding: 10px 15px;
|
760 |
+
border-radius: 50px;
|
761 |
+
border: none;
|
762 |
+
color: white;
|
763 |
+
cursor: pointer;
|
764 |
+
white-space: nowrap;
|
765 |
+
}
|
766 |
+
|
767 |
+
.plan-selector-container {
|
768 |
+
display: flex;
|
769 |
+
justify-content: center;
|
770 |
+
margin-bottom: 20px;
|
771 |
+
}
|
772 |
+
|
773 |
+
.plan-selector {
|
774 |
+
padding: 10px 20px;
|
775 |
+
border: 2px solid var(--primary-color);
|
776 |
+
border-radius: var(--border-radius);
|
777 |
+
background-color: white;
|
778 |
+
color: var(--text-primary);
|
779 |
+
font-family: 'Poppins', sans-serif;
|
780 |
+
font-size: 1rem;
|
781 |
+
cursor: pointer;
|
782 |
+
transition: var(--transition);
|
783 |
+
width: 200px;
|
784 |
+
text-align: center;
|
785 |
+
}
|
786 |
+
|
787 |
+
.plan-selector:hover {
|
788 |
+
border-color: var(--accent-color);
|
789 |
+
}
|
790 |
+
|
791 |
+
.plan-selector:focus {
|
792 |
+
outline: none;
|
793 |
+
border-color: var(--primary-color);
|
794 |
+
box-shadow: 0 0 0 3px rgba(74, 144, 226, 0.2);
|
795 |
+
}
|
796 |
+
</style>
|
797 |
+
</head>
|
798 |
+
<body>
|
799 |
+
<h1>HIVE PROP</h1>
|
800 |
+
|
801 |
+
<!-- Property Search Section -->
|
802 |
+
<div class="search-container">
|
803 |
+
<form id="queryForm">
|
804 |
+
<input type="text" id="userQuery" placeholder="Search for your dream property..." required>
|
805 |
+
<button type="button" id="microphoneButton">
|
806 |
+
<i class="fas fa-microphone"></i>
|
807 |
+
</button>
|
808 |
+
<button type="submit">Search Properties</button>
|
809 |
+
</form>
|
810 |
+
</div>
|
811 |
+
<div class="plan-selector-container">
|
812 |
+
<select id="planSelector" class="plan-selector">
|
813 |
+
<option value="basic" selected>Basic Plan</option>
|
814 |
+
<option value="plus">Plus Plan</option>
|
815 |
+
<option value="pro">Pro Plan</option>
|
816 |
+
</select>
|
817 |
+
</div>
|
818 |
+
<div id="results"></div>
|
819 |
+
<div id="errorMessage" class="error-message"></div>
|
820 |
+
<div id="loadingMessage" style="display: none;">
|
821 |
+
<div class="loading-spinner"></div>
|
822 |
+
<!--<div class="loading-message">Loading...</div>-->
|
823 |
+
</div>
|
824 |
+
<div id="listeningMessage" style="display: none;">
|
825 |
+
<div class="loading-spinner"></div>
|
826 |
+
<div class="loading-message">Listening...</div>
|
827 |
+
</div>
|
828 |
+
|
829 |
+
<!-- Chat Assistant and Recommendations Container -->
|
830 |
+
<div class="chatbot-container">
|
831 |
+
<!-- Chat Assistant Icon -->
|
832 |
+
<div class="chatbot-icon" id="chatbot-icon" style="background-color: #31511E;">
|
833 |
+
<i class="fas fa-comment"></i>
|
834 |
+
</div>
|
835 |
+
|
836 |
+
<!-- Recommendations Icon -->
|
837 |
+
<div class="chatbot-icon" id="recommend-icon" style="background-color: #859F3D;">
|
838 |
+
<i class="fas fa-search"></i>
|
839 |
+
</div>
|
840 |
+
</div>
|
841 |
+
|
842 |
+
<!-- Chat Assistant Container -->
|
843 |
+
<div class="chat-container" id="chat-container" style="left: 2rem;">
|
844 |
+
<div class="chat-header">
|
845 |
+
<img src="/api/placeholder/32/32" alt="Bot Avatar">
|
846 |
+
<span>Hive Prop Chat Bot</span>
|
847 |
+
<button class="close-button">
|
848 |
+
<i class="fas fa-times"></i>
|
849 |
+
</button>
|
850 |
+
</div>
|
851 |
+
|
852 |
+
<div class="chat-body" id="chat-body">
|
853 |
+
<div class="message bot-message">
|
854 |
+
<div class="message-content">
|
855 |
+
Hello! I'm your real estate recommendation assistant. How can I help you today? Please reply with 'hi' to access your location.
|
856 |
+
</div>
|
857 |
+
<div class="timestamp">Now</div>
|
858 |
+
</div>
|
859 |
+
<div class="typing-indicator" id="typing-indicator">
|
860 |
+
<div class="typing-dot"></div>
|
861 |
+
<div class="typing-dot"></div>
|
862 |
+
<div class="typing-dot"></div>
|
863 |
+
</div>
|
864 |
+
</div>
|
865 |
+
|
866 |
+
<div class="chat-footer">
|
867 |
+
<div class="input-group">
|
868 |
+
<input type="text" class="chat-input" id="user-input" placeholder="Type your message..." required>
|
869 |
+
<button class="send-chatbutton" id="send-button">
|
870 |
+
<i class="fas fa-paper-plane"></i>
|
871 |
+
</button>
|
872 |
+
<button type="button" style="padding: 10px 15px;border-radius: 100px;" id="chatMicrophoneButton">
|
873 |
+
<i class="fas fa-microphone"></i>
|
874 |
+
</button>
|
875 |
+
</div>
|
876 |
+
<div class="quick-keywords">
|
877 |
+
<button class="quick-keyword" data-message="Show me nearby properties">Nearby Properties</button>
|
878 |
+
<button class="quick-keyword" data-message="Find luxury homes">Luxury Homes</button>
|
879 |
+
<button class="quick-keyword" data-message="Affordable apartments">Affordable Apartments</button>
|
880 |
+
<button class="quick-keyword" data-message="Properties with pools">Properties with Pools</button>
|
881 |
+
</div>
|
882 |
+
</div>
|
883 |
+
</div>
|
884 |
+
|
885 |
+
<!-- Recommendations Container -->
|
886 |
+
<div class="chat-container" id="recommend-container" style="right: 2rem;">
|
887 |
+
<div class="chat-header">
|
888 |
+
<img src="/content/sample_data/hive_prop.jpg" alt="Bot Avatar">
|
889 |
+
<span>Hive Prop Chatbot</span>
|
890 |
+
<button class="close-button">
|
891 |
+
<i class="fas fa-times"></i>
|
892 |
+
</button>
|
893 |
+
</div>
|
894 |
+
|
895 |
+
<div class="chat-body" id="recommend-body">
|
896 |
+
<div class="message bot-message">
|
897 |
+
<div class="message-content">
|
898 |
+
Hello! I'm your real estate recommendation assistant. How can I help you today? Please reply with 'hi' to access your location.
|
899 |
+
</div>
|
900 |
+
<div class="timestamp">Now</div>
|
901 |
+
</div>
|
902 |
+
<div class="typing-indicator" id="recommend-typing-indicator">
|
903 |
+
<div class="typing-dot"></div>
|
904 |
+
<div class="typing-dot"></div>
|
905 |
+
<div class="typing-dot"></div>
|
906 |
+
</div>
|
907 |
+
</div>
|
908 |
+
|
909 |
+
<div class="chat-footer">
|
910 |
+
<div class="input-group">
|
911 |
+
<input type="text" class="chat-input" id="recommend-input" placeholder="Type your message..." required>
|
912 |
+
<button class="send-chatbutton" id="recommend-send-button">
|
913 |
+
<i class="fas fa-paper-plane"></i>
|
914 |
+
</button>
|
915 |
+
<button type="button" style="padding: 10px 15px;border-radius: 100px;" id="recommendMicrophoneButton">
|
916 |
+
<i class="fas fa-microphone"></i>
|
917 |
+
</button>
|
918 |
+
</div>
|
919 |
+
<div class="quick-keywords" style="
|
920 |
+
border-radius:50px;
|
921 |
+
display: flex;
|
922 |
+
flex-direction: row;
|
923 |
+
overflow-x: auto;
|
924 |
+
white-space: nowrap;
|
925 |
+
gap: 10px;
|
926 |
+
padding: 10px;
|
927 |
+
scrollbar-width: none; /* Hide scrollbar for Firefox */
|
928 |
+
-ms-overflow-style: none; /* Hide scrollbar for IE/Edge */
|
929 |
+
position: relative;
|
930 |
+
background: white;
|
931 |
+
box-shadow: inset 20px 0 20px -10px rgba(0, 0, 0, 0.5), inset -20px 0 20px -10px rgba(0, 0, 0, 0.5);
|
932 |
+
">
|
933 |
+
<button class="quick-keyword" data-message="Show me nearby properties">Nearby Properties</button>
|
934 |
+
<button class="quick-keyword" data-message="Find luxury homes">Luxury Homes</button>
|
935 |
+
<button class="quick-keyword" data-message="Affordable apartments">Affordable Apartments</button>
|
936 |
+
<button class="quick-keyword" data-message="Properties with pools">Properties with Pools</button>
|
937 |
+
<button class="quick-keyword" data-message="Pet-friendly rentals">Pet-friendly Rentals</button>
|
938 |
+
<button class="quick-keyword" data-message="Beachfront properties">Beachfront Properties</button>
|
939 |
+
</div>
|
940 |
+
</div>
|
941 |
+
</div>
|
942 |
+
|
943 |
+
<script>
|
944 |
+
// Property Search Scripts
|
945 |
+
$(document).ready(function() {
|
946 |
+
$('#queryForm').on('submit', function(event) {
|
947 |
+
event.preventDefault();
|
948 |
+
const query = $('#userQuery').val();
|
949 |
+
$('#results').empty();
|
950 |
+
$('#errorMessage').empty();
|
951 |
+
$('#loadingMessage').show();
|
952 |
+
|
953 |
+
$.ajax({
|
954 |
+
url: '/search',
|
955 |
+
type: 'POST',
|
956 |
+
contentType: 'application/json',
|
957 |
+
data: JSON.stringify({
|
958 |
+
query: query,
|
959 |
+
user_plan: plan
|
960 |
+
}),
|
961 |
+
success: function(data) {
|
962 |
+
$('#loadingMessage').hide();
|
963 |
+
if (data.error) {
|
964 |
+
$('#errorMessage').text('Error: ' + data.error);
|
965 |
+
} else {
|
966 |
+
displayProperties(data, plan);
|
967 |
+
}
|
968 |
+
},
|
969 |
+
error: function() {
|
970 |
+
$('#loadingMessage').hide();
|
971 |
+
$('#errorMessage').text('Error: Unable to fetch data. Please try again later.');
|
972 |
+
}
|
973 |
+
});
|
974 |
+
});
|
975 |
+
|
976 |
+
$('#queryForm').on('submit', function() {
|
977 |
+
$('html, body').animate({
|
978 |
+
scrollTop: $('#results').offset().top - 20
|
979 |
+
}, 1000);
|
980 |
+
});
|
981 |
+
|
982 |
+
$('#userQuery').on('focus', function() {
|
983 |
+
$(this).parent().addClass('focused');
|
984 |
+
}).on('blur', function() {
|
985 |
+
$(this).parent().removeClass('focused');
|
986 |
+
});
|
987 |
+
|
988 |
+
$(window).on('resize', function() {
|
989 |
+
if ($(window).width() <= 768) {
|
990 |
+
$('.property').addClass('mobile-view');
|
991 |
+
} else {
|
992 |
+
$('.property').removeClass('mobile-view');
|
993 |
+
}
|
994 |
+
});
|
995 |
+
|
996 |
+
$('[data-tooltip]').each(function() {
|
997 |
+
$(this).tooltip({
|
998 |
+
placement: 'top',
|
999 |
+
title: $(this).data('tooltip')
|
1000 |
+
});
|
1001 |
+
});
|
1002 |
+
|
1003 |
+
if ('IntersectionObserver' in window) {
|
1004 |
+
const imageObserver = new IntersectionObserver((entries, observer) => {
|
1005 |
+
entries.forEach(entry => {
|
1006 |
+
if (entry.isIntersecting) {
|
1007 |
+
const img = entry.target;
|
1008 |
+
img.src = img.dataset.src;
|
1009 |
+
img.removeAttribute('data-src');
|
1010 |
+
observer.unobserve(img);
|
1011 |
+
}
|
1012 |
+
});
|
1013 |
+
});
|
1014 |
+
|
1015 |
+
document.querySelectorAll('img[data-src]').forEach(img => {
|
1016 |
+
imageObserver.observe(img);
|
1017 |
+
});
|
1018 |
+
}
|
1019 |
+
|
1020 |
+
const microphoneButton = document.getElementById("microphoneButton");
|
1021 |
+
const recognizer = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
|
1022 |
+
recognizer.lang = 'en-US';
|
1023 |
+
recognizer.continuous = true;
|
1024 |
+
recognizer.interimResults = true;
|
1025 |
+
|
1026 |
+
let silenceTimer = null;
|
1027 |
+
const SILENCE_DURATION = 3000;
|
1028 |
+
|
1029 |
+
microphoneButton.addEventListener("click", () => {
|
1030 |
+
microphoneButton.classList.add('listening');
|
1031 |
+
$('#listeningMessage').show();
|
1032 |
+
recognizer.start();
|
1033 |
+
console.log("Listening...");
|
1034 |
+
});
|
1035 |
+
|
1036 |
+
recognizer.onresult = function(event) {
|
1037 |
+
clearTimeout(silenceTimer);
|
1038 |
+
|
1039 |
+
let finalTranscript = '';
|
1040 |
+
for (let i = event.resultIndex; i < event.results.length; i++) {
|
1041 |
+
if (event.results[i].isFinal) {
|
1042 |
+
finalTranscript += event.results[i][0].transcript;
|
1043 |
+
}
|
1044 |
+
}
|
1045 |
+
|
1046 |
+
if (finalTranscript) {
|
1047 |
+
$('#userQuery').val(finalTranscript);
|
1048 |
+
console.log("Transcript: ", finalTranscript);
|
1049 |
+
}
|
1050 |
+
|
1051 |
+
silenceTimer = setTimeout(() => {
|
1052 |
+
recognizer.stop();
|
1053 |
+
console.log("Stopped listening due to silence");
|
1054 |
+
}, SILENCE_DURATION);
|
1055 |
+
};
|
1056 |
+
|
1057 |
+
recognizer.onend = function() {
|
1058 |
+
console.log("Speech recognition service disconnected");
|
1059 |
+
$('#listeningMessage').hide();
|
1060 |
+
microphoneButton.classList.remove('listening');
|
1061 |
+
clearTimeout(silenceTimer);
|
1062 |
+
|
1063 |
+
if ($('#userQuery').val().trim()) {
|
1064 |
+
$('#queryForm').submit();
|
1065 |
+
}
|
1066 |
+
};
|
1067 |
+
|
1068 |
+
recognizer.onerror = function(event) {
|
1069 |
+
console.error("Speech recognition error", event.error);
|
1070 |
+
$('#listeningMessage').hide();
|
1071 |
+
microphoneButton.classList.remove('listening');
|
1072 |
+
clearTimeout(silenceTimer);
|
1073 |
+
};
|
1074 |
+
|
1075 |
+
document.getElementById("chatbot-icon").addEventListener("click", function() {
|
1076 |
+
const chatContainer = document.getElementById("chat-container");
|
1077 |
+
chatContainer.style.display = chatContainer.style.display === "none" || chatContainer.style.display === "" ? "flex" : "none";
|
1078 |
+
});
|
1079 |
+
|
1080 |
+
const chatBody = document.getElementById("chat-body");
|
1081 |
+
const userInput = document.getElementById("user-input");
|
1082 |
+
const sendButton = document.getElementById("send-button");
|
1083 |
+
const typingIndicator = document.getElementById("typing-indicator");
|
1084 |
+
|
1085 |
+
function addMessage(content, isUser = false) {
|
1086 |
+
const messageDiv = document.createElement("div");
|
1087 |
+
messageDiv.className = `message ${isUser ? "user-message" : "bot-message"}`;
|
1088 |
+
|
1089 |
+
const messageContent = document.createElement("div");
|
1090 |
+
messageContent.className = "message-content";
|
1091 |
+
messageContent.innerHTML = formatMessageContent(content);
|
1092 |
+
|
1093 |
+
const timestamp = document.createElement("div");
|
1094 |
+
timestamp.className = "timestamp";
|
1095 |
+
timestamp.textContent = new Date().toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" });
|
1096 |
+
|
1097 |
+
messageDiv.appendChild(messageContent);
|
1098 |
+
messageDiv.appendChild(timestamp);
|
1099 |
+
|
1100 |
+
chatBody.insertBefore(messageDiv, typingIndicator);
|
1101 |
+
chatBody.scrollTop = chatBody.scrollHeight;
|
1102 |
+
}
|
1103 |
+
|
1104 |
+
function handleUserInput() {
|
1105 |
+
const message = userInput.value.trim().toLowerCase();
|
1106 |
+
if (!message) return;
|
1107 |
+
|
1108 |
+
addMessage(message, true);
|
1109 |
+
userInput.value = "";
|
1110 |
+
typingIndicator.style.display = "block";
|
1111 |
+
|
1112 |
+
if (message === 'hi') {
|
1113 |
+
if (navigator.geolocation) {
|
1114 |
+
navigator.geolocation.getCurrentPosition(function(position) {
|
1115 |
+
const latitude = position.coords.latitude;
|
1116 |
+
const longitude = position.coords.longitude;
|
1117 |
+
console.log("User Location:", latitude, longitude);
|
1118 |
+
|
1119 |
+
// First set the location
|
1120 |
+
const locationData = {
|
1121 |
+
latitude: latitude,
|
1122 |
+
longitude: longitude,
|
1123 |
+
session_id: 'chat-session',
|
1124 |
+
user_plan: plan
|
1125 |
+
};
|
1126 |
+
|
1127 |
+
fetch("/set-location", {
|
1128 |
+
method: "POST",
|
1129 |
+
headers: {
|
1130 |
+
"Content-Type": "application/json",
|
1131 |
+
},
|
1132 |
+
body: JSON.stringify(locationData)
|
1133 |
+
})
|
1134 |
+
.then(response => response.json())
|
1135 |
+
.then(data => {
|
1136 |
+
console.log("Location set:", data);
|
1137 |
+
if (data.error) {
|
1138 |
+
addMessage("Error setting location: " + data.error);
|
1139 |
+
} else {
|
1140 |
+
const city = data.city || 'Unknown city';
|
1141 |
+
const state = data.state || 'Unknown state';
|
1142 |
+
const country = data.country || 'Unknown country';
|
1143 |
+
addMessage(`Location set successfully in ${city}, ${state}, ${country}. Would you like to see nearby properties? Please reply with 'yes' or 'no'.`);
|
1144 |
+
}
|
1145 |
+
typingIndicator.style.display = "none";
|
1146 |
+
})
|
1147 |
+
.catch(error => {
|
1148 |
+
console.error("Error setting location:", error);
|
1149 |
+
addMessage("Unable to access your location. Please try again.");
|
1150 |
+
typingIndicator.style.display = "none";
|
1151 |
+
});
|
1152 |
+
}, function(error) {
|
1153 |
+
console.error("Error getting user location:", error);
|
1154 |
+
addMessage("Unable to access your location. Please try again.");
|
1155 |
+
typingIndicator.style.display = "none";
|
1156 |
+
});
|
1157 |
+
} else {
|
1158 |
+
console.error("Geolocation is not supported by this browser.");
|
1159 |
+
addMessage("Geolocation is not supported by your browser.");
|
1160 |
+
typingIndicator.style.display = "none";
|
1161 |
+
}
|
1162 |
+
} else {
|
1163 |
+
// Handle other messages with plan
|
1164 |
+
const fetchOptions = {
|
1165 |
+
method: "POST",
|
1166 |
+
headers: {
|
1167 |
+
"Content-Type": "application/json",
|
1168 |
+
},
|
1169 |
+
body: JSON.stringify({
|
1170 |
+
query: message,
|
1171 |
+
session_id: 'chat-session',
|
1172 |
+
user_plan: plan
|
1173 |
+
})
|
1174 |
+
};
|
1175 |
+
|
1176 |
+
const endpoint = message === 'yes' ? '/recommend' : '/generate';
|
1177 |
+
|
1178 |
+
fetch(endpoint, fetchOptions)
|
1179 |
+
.then(response => response.json())
|
1180 |
+
.then(data => {
|
1181 |
+
if (message === 'yes' && data.properties) {
|
1182 |
+
let propertiesMessage = "Here are the 5 nearest properties to your location:\n";
|
1183 |
+
data.properties.forEach(property => {
|
1184 |
+
propertiesMessage += `**${property.PropertyName}** at ${property.Address}, ${property.City}\n`;
|
1185 |
+
propertiesMessage += ` Type: ${property.PropertyType}\n`;
|
1186 |
+
propertiesMessage += ` (Distance: ${property.Distance} miles)\n\n`;
|
1187 |
+
});
|
1188 |
+
addMessage(propertiesMessage);
|
1189 |
+
} else {
|
1190 |
+
addMessage(data.response);
|
1191 |
+
}
|
1192 |
+
typingIndicator.style.display = "none";
|
1193 |
+
})
|
1194 |
+
.catch(error => {
|
1195 |
+
console.error("Error:", error);
|
1196 |
+
addMessage("I apologize, but I encountered an error. Please try again.");
|
1197 |
+
typingIndicator.style.display = "none";
|
1198 |
+
});
|
1199 |
+
}
|
1200 |
+
}
|
1201 |
+
|
1202 |
+
sendButton.addEventListener("click", handleUserInput);
|
1203 |
+
userInput.addEventListener("keypress", function(event) {
|
1204 |
+
if (event.key === "Enter") {
|
1205 |
+
handleUserInput();
|
1206 |
+
}
|
1207 |
+
});
|
1208 |
+
|
1209 |
+
const chatMicrophoneButton = document.getElementById("chatMicrophoneButton");
|
1210 |
+
const chatRecognizer = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
|
1211 |
+
chatRecognizer.lang = 'en-US';
|
1212 |
+
chatRecognizer.continuous = true;
|
1213 |
+
chatRecognizer.interimResults = true;
|
1214 |
+
|
1215 |
+
let chatSilenceTimer = null;
|
1216 |
+
|
1217 |
+
chatMicrophoneButton.addEventListener("click", () => {
|
1218 |
+
chatMicrophoneButton.classList.add('listening');
|
1219 |
+
$('#listeningMessage').show();
|
1220 |
+
chatRecognizer.start();
|
1221 |
+
console.log("Chat listening...");
|
1222 |
+
});
|
1223 |
+
|
1224 |
+
chatRecognizer.onresult = function(event) {
|
1225 |
+
clearTimeout(chatSilenceTimer);
|
1226 |
+
|
1227 |
+
let finalTranscript = '';
|
1228 |
+
for (let i = event.resultIndex; i < event.results.length; i++) {
|
1229 |
+
if (event.results[i].isFinal) {
|
1230 |
+
finalTranscript += event.results[i][0].transcript;
|
1231 |
+
}
|
1232 |
+
}
|
1233 |
+
|
1234 |
+
if (finalTranscript) {
|
1235 |
+
userInput.value = finalTranscript;
|
1236 |
+
console.log("Chat Transcript: ", finalTranscript);
|
1237 |
+
}
|
1238 |
+
|
1239 |
+
chatSilenceTimer = setTimeout(() => {
|
1240 |
+
chatRecognizer.stop();
|
1241 |
+
console.log("Stopped chat listening due to silence");
|
1242 |
+
}, SILENCE_DURATION);
|
1243 |
+
};
|
1244 |
+
|
1245 |
+
chatRecognizer.onend = function() {
|
1246 |
+
console.log("Chat speech recognition service disconnected");
|
1247 |
+
$('#listeningMessage').hide();
|
1248 |
+
chatMicrophoneButton.classList.remove('listening');
|
1249 |
+
clearTimeout(chatSilenceTimer);
|
1250 |
+
|
1251 |
+
if (userInput.value.trim()) {
|
1252 |
+
handleUserInput();
|
1253 |
+
}
|
1254 |
+
};
|
1255 |
+
|
1256 |
+
chatRecognizer.onerror = function(event) {
|
1257 |
+
console.error("Chat speech recognition error", event.error);
|
1258 |
+
$('#listeningMessage').hide();
|
1259 |
+
chatMicrophoneButton.classList.remove('listening');
|
1260 |
+
clearTimeout(chatSilenceTimer);
|
1261 |
+
};
|
1262 |
+
|
1263 |
+
document.getElementById("recommend-icon").addEventListener("click", function() {
|
1264 |
+
const recommendContainer = document.getElementById("recommend-container");
|
1265 |
+
recommendContainer.style.display = recommendContainer.style.display === "none" || recommendContainer.style.display === "" ? "flex" : "none";
|
1266 |
+
});
|
1267 |
+
|
1268 |
+
const recommendBody = document.getElementById("recommend-body");
|
1269 |
+
const recommendInput = document.getElementById("recommend-input");
|
1270 |
+
const recommendSendButton = document.getElementById("recommend-send-button");
|
1271 |
+
const recommendTypingIndicator = document.getElementById("recommend-typing-indicator");
|
1272 |
+
|
1273 |
+
function addRecommendMessage(content, isUser = false) {
|
1274 |
+
const messageDiv = document.createElement("div");
|
1275 |
+
messageDiv.className = `message ${isUser ? "user-message" : "bot-message"}`;
|
1276 |
+
|
1277 |
+
const messageContent = document.createElement("div");
|
1278 |
+
messageContent.className = "message-content";
|
1279 |
+
messageContent.innerHTML = formatMessageContent(content);
|
1280 |
+
|
1281 |
+
const timestamp = document.createElement("div");
|
1282 |
+
timestamp.className = "timestamp";
|
1283 |
+
timestamp.textContent = new Date().toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" });
|
1284 |
+
|
1285 |
+
messageDiv.appendChild(messageContent);
|
1286 |
+
messageDiv.appendChild(timestamp);
|
1287 |
+
|
1288 |
+
recommendBody.insertBefore(messageDiv, recommendTypingIndicator);
|
1289 |
+
recommendBody.scrollTop = recommendBody.scrollHeight;
|
1290 |
+
}
|
1291 |
+
|
1292 |
+
function handleRecommendInput() {
|
1293 |
+
const message = recommendInput.value.trim().toLowerCase();
|
1294 |
+
if (!message) return;
|
1295 |
+
|
1296 |
+
addRecommendMessage(message, true);
|
1297 |
+
recommendInput.value = "";
|
1298 |
+
recommendTypingIndicator.style.display = "block";
|
1299 |
+
|
1300 |
+
if (message === 'hi') {
|
1301 |
+
if (navigator.geolocation) {
|
1302 |
+
navigator.geolocation.getCurrentPosition(function(position) {
|
1303 |
+
const latitude = position.coords.latitude;
|
1304 |
+
const longitude = position.coords.longitude;
|
1305 |
+
console.log("User Location:", latitude, longitude);
|
1306 |
+
|
1307 |
+
// First set the location
|
1308 |
+
const locationData = {
|
1309 |
+
latitude: latitude,
|
1310 |
+
longitude: longitude,
|
1311 |
+
session_id: 'recommend-session',
|
1312 |
+
user_plan: plan
|
1313 |
+
};
|
1314 |
+
|
1315 |
+
fetch("/set-location", {
|
1316 |
+
method: "POST",
|
1317 |
+
headers: {
|
1318 |
+
"Content-Type": "application/json",
|
1319 |
+
},
|
1320 |
+
body: JSON.stringify(locationData)
|
1321 |
+
})
|
1322 |
+
.then(response => response.json())
|
1323 |
+
.then(data => {
|
1324 |
+
console.log("Location set:", data);
|
1325 |
+
if (data.error) {
|
1326 |
+
addRecommendMessage("Error setting location: " + data.error);
|
1327 |
+
} else {
|
1328 |
+
const city = data.city || 'Unknown city';
|
1329 |
+
const state = data.state || 'Unknown state';
|
1330 |
+
const country = data.country || 'Unknown country';
|
1331 |
+
addRecommendMessage(`Location set successfully in ${city}, ${state}, ${country}. Would you like to see nearby properties? Please reply with 'yes' or 'no'.`);
|
1332 |
+
}
|
1333 |
+
recommendTypingIndicator.style.display = "none";
|
1334 |
+
})
|
1335 |
+
.catch(error => {
|
1336 |
+
console.error("Error setting location:", error);
|
1337 |
+
addRecommendMessage("Unable to access your location. Please try again.");
|
1338 |
+
recommendTypingIndicator.style.display = "none";
|
1339 |
+
});
|
1340 |
+
}, function(error) {
|
1341 |
+
console.error("Error getting user location:", error);
|
1342 |
+
addRecommendMessage("Unable to access your location. Please try again.");
|
1343 |
+
recommendTypingIndicator.style.display = "none";
|
1344 |
+
});
|
1345 |
+
} else {
|
1346 |
+
console.error("Geolocation is not supported by this browser.");
|
1347 |
+
addRecommendMessage("Geolocation is not supported by your browser.");
|
1348 |
+
recommendTypingIndicator.style.display = "none";
|
1349 |
+
}
|
1350 |
+
} else {
|
1351 |
+
// Handle other messages with plan
|
1352 |
+
const fetchOptions = {
|
1353 |
+
method: "POST",
|
1354 |
+
headers: {
|
1355 |
+
"Content-Type": "application/json",
|
1356 |
+
},
|
1357 |
+
body: JSON.stringify({
|
1358 |
+
query: message,
|
1359 |
+
session_id: 'recommend-session',
|
1360 |
+
user_plan: plan
|
1361 |
+
})
|
1362 |
+
};
|
1363 |
+
|
1364 |
+
fetch("/recommend", fetchOptions)
|
1365 |
+
.then(response => response.json())
|
1366 |
+
.then(data => {
|
1367 |
+
if (message === 'yes' && data.properties) {
|
1368 |
+
let propertiesMessage = "Here are the 5 nearest properties to your location:\n";
|
1369 |
+
data.properties.forEach(property => {
|
1370 |
+
propertiesMessage += `**${property.PropertyName}** at ${property.Address}, ${property.City}\n`;
|
1371 |
+
propertiesMessage += ` Type: ${property.PropertyType}\n`;
|
1372 |
+
propertiesMessage += ` (Distance: ${property.Distance} miles)\n\n`;
|
1373 |
+
});
|
1374 |
+
addRecommendMessage(propertiesMessage);
|
1375 |
+
} else {
|
1376 |
+
addRecommendMessage(data.response);
|
1377 |
+
}
|
1378 |
+
recommendTypingIndicator.style.display = "none";
|
1379 |
+
})
|
1380 |
+
.catch(error => {
|
1381 |
+
console.error("Error:", error);
|
1382 |
+
addRecommendMessage("I apologize, but I encountered an error. Please try again.");
|
1383 |
+
recommendTypingIndicator.style.display = "none";
|
1384 |
+
});
|
1385 |
+
}
|
1386 |
+
}
|
1387 |
+
|
1388 |
+
recommendSendButton.addEventListener("click", handleRecommendInput);
|
1389 |
+
recommendInput.addEventListener("keypress", function(event) {
|
1390 |
+
if (event.key === "Enter") {
|
1391 |
+
handleRecommendInput();
|
1392 |
+
}
|
1393 |
+
});
|
1394 |
+
|
1395 |
+
const recommendMicrophoneButton = document.getElementById("recommendMicrophoneButton");
|
1396 |
+
const recommendRecognizer = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
|
1397 |
+
recommendRecognizer.lang = 'en-US';
|
1398 |
+
recommendRecognizer.continuous = true;
|
1399 |
+
recommendRecognizer.interimResults = true;
|
1400 |
+
|
1401 |
+
let recommendSilenceTimer = null;
|
1402 |
+
|
1403 |
+
recommendMicrophoneButton.addEventListener("click", () => {
|
1404 |
+
recommendMicrophoneButton.classList.add('listening');
|
1405 |
+
$('#listeningMessage').show();
|
1406 |
+
recommendRecognizer.start();
|
1407 |
+
console.log("Recommend listening...");
|
1408 |
+
});
|
1409 |
+
|
1410 |
+
recommendRecognizer.onresult = function(event) {
|
1411 |
+
clearTimeout(recommendSilenceTimer);
|
1412 |
+
|
1413 |
+
let finalTranscript = '';
|
1414 |
+
for (let i = event.resultIndex; i < event.results.length; i++) {
|
1415 |
+
if (event.results[i].isFinal) {
|
1416 |
+
finalTranscript += event.results[i][0].transcript;
|
1417 |
+
}
|
1418 |
+
}
|
1419 |
+
|
1420 |
+
if (finalTranscript) {
|
1421 |
+
recommendInput.value = finalTranscript;
|
1422 |
+
console.log("Recommend Transcript: ", finalTranscript);
|
1423 |
+
}
|
1424 |
+
|
1425 |
+
recommendSilenceTimer = setTimeout(() => {
|
1426 |
+
recommendRecognizer.stop();
|
1427 |
+
console.log("Stopped recommend listening due to silence");
|
1428 |
+
}, SILENCE_DURATION);
|
1429 |
+
};
|
1430 |
+
|
1431 |
+
recommendRecognizer.onend = function() {
|
1432 |
+
console.log("Recommend speech recognition service disconnected");
|
1433 |
+
$('#listeningMessage').hide();
|
1434 |
+
recommendMicrophoneButton.classList.remove('listening');
|
1435 |
+
clearTimeout(recommendSilenceTimer);
|
1436 |
+
|
1437 |
+
if (recommendInput.value.trim()) {
|
1438 |
+
handleRecommendInput();
|
1439 |
+
}
|
1440 |
+
};
|
1441 |
+
|
1442 |
+
recommendRecognizer.onerror = function(event) {
|
1443 |
+
console.error("Recommend speech recognition error", event.error);
|
1444 |
+
$('#listeningMessage').hide();
|
1445 |
+
recommendMicrophoneButton.classList.remove('listening');
|
1446 |
+
clearTimeout(recommendSilenceTimer);
|
1447 |
+
};
|
1448 |
+
|
1449 |
+
function formatMessageContent(content) {
|
1450 |
+
content = content.replace(/\n/g, '<br>');
|
1451 |
+
content = content.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>');
|
1452 |
+
return content;
|
1453 |
+
}
|
1454 |
+
|
1455 |
+
document.querySelectorAll('.quick-keyword').forEach(button => {
|
1456 |
+
button.addEventListener('click', function() {
|
1457 |
+
const message = this.getAttribute('data-message');
|
1458 |
+
if (this.closest('#chat-container')) {
|
1459 |
+
userInput.value = message;
|
1460 |
+
handleUserInput();
|
1461 |
+
} else if (this.closest('#recommend-container')) {
|
1462 |
+
recommendInput.value = message;
|
1463 |
+
handleRecommendInput();
|
1464 |
+
}
|
1465 |
+
});
|
1466 |
+
});
|
1467 |
+
|
1468 |
+
let plan = 'basic'; // Default plan
|
1469 |
+
|
1470 |
+
// Generate a unique session ID
|
1471 |
+
function generateSessionId() {
|
1472 |
+
return 'session_' + Math.random().toString(36).substr(2, 9) + '_' + Date.now();
|
1473 |
+
}
|
1474 |
+
|
1475 |
+
// Initialize session IDs
|
1476 |
+
const chatSessionId = generateSessionId();
|
1477 |
+
const recommendSessionId = generateSessionId();
|
1478 |
+
|
1479 |
+
// Initialize with stored plan or default to 'basic'
|
1480 |
+
const storedPlan = localStorage.getItem('selectedPlan') || 'basic';
|
1481 |
+
$('#planSelector').val(storedPlan);
|
1482 |
+
plan = storedPlan;
|
1483 |
+
updatePlanInRequests(storedPlan);
|
1484 |
+
|
1485 |
+
// Add plan selector change handler
|
1486 |
+
$('#planSelector').on('change', function() {
|
1487 |
+
const selectedPlan = $(this).val();
|
1488 |
+
console.log('Plan changed to:', selectedPlan);
|
1489 |
+
|
1490 |
+
// Store the selected plan in localStorage
|
1491 |
+
localStorage.setItem('selectedPlan', selectedPlan);
|
1492 |
+
|
1493 |
+
// Update the global plan variable
|
1494 |
+
plan = selectedPlan;
|
1495 |
+
|
1496 |
+
// Update all subsequent requests to include the new plan
|
1497 |
+
updatePlanInRequests(selectedPlan);
|
1498 |
+
});
|
1499 |
+
|
1500 |
+
// Function to update plan in all requests
|
1501 |
+
function updatePlanInRequests(plan) {
|
1502 |
+
// Update search request
|
1503 |
+
$('#queryForm').off('submit').on('submit', function(event) {
|
1504 |
+
event.preventDefault();
|
1505 |
+
const query = $('#userQuery').val();
|
1506 |
+
$('#results').empty();
|
1507 |
+
$('#errorMessage').empty();
|
1508 |
+
$('#loadingMessage').show();
|
1509 |
+
|
1510 |
+
$.ajax({
|
1511 |
+
url: '/search',
|
1512 |
+
type: 'POST',
|
1513 |
+
contentType: 'application/json',
|
1514 |
+
data: JSON.stringify({
|
1515 |
+
query: query,
|
1516 |
+
user_plan: plan
|
1517 |
+
}),
|
1518 |
+
success: function(data) {
|
1519 |
+
$('#loadingMessage').hide();
|
1520 |
+
if (data.error) {
|
1521 |
+
$('#errorMessage').text('Error: ' + data.error);
|
1522 |
+
} else {
|
1523 |
+
displayProperties(data, plan);
|
1524 |
+
}
|
1525 |
+
},
|
1526 |
+
error: function() {
|
1527 |
+
$('#loadingMessage').hide();
|
1528 |
+
$('#errorMessage').text('Error: Unable to fetch data. Please try again later.');
|
1529 |
+
}
|
1530 |
+
});
|
1531 |
+
});
|
1532 |
+
|
1533 |
+
// Update chat requests
|
1534 |
+
handleUserInput = function() {
|
1535 |
+
const message = userInput.value.trim().toLowerCase();
|
1536 |
+
if (!message) return;
|
1537 |
+
|
1538 |
+
addMessage(message, true);
|
1539 |
+
userInput.value = "";
|
1540 |
+
typingIndicator.style.display = "block";
|
1541 |
+
|
1542 |
+
const fetchOptions = {
|
1543 |
+
method: "POST",
|
1544 |
+
headers: {
|
1545 |
+
"Content-Type": "application/json",
|
1546 |
+
},
|
1547 |
+
body: JSON.stringify({
|
1548 |
+
query: message,
|
1549 |
+
session_id: chatSessionId,
|
1550 |
+
user_plan: plan
|
1551 |
+
})
|
1552 |
+
};
|
1553 |
+
|
1554 |
+
const endpoint = message === 'yes' ? '/recommend' : '/generate';
|
1555 |
+
|
1556 |
+
fetch(endpoint, fetchOptions)
|
1557 |
+
.then(response => response.json())
|
1558 |
+
.then(data => {
|
1559 |
+
if (message === 'yes' && data.properties) {
|
1560 |
+
let propertiesMessage = "Here are the 5 nearest properties to your location:\n";
|
1561 |
+
data.properties.forEach(property => {
|
1562 |
+
propertiesMessage += `**${property.PropertyName}** at ${property.Address}, ${property.City}\n`;
|
1563 |
+
propertiesMessage += ` Type: ${property.PropertyType}\n`;
|
1564 |
+
propertiesMessage += ` (Distance: ${property.Distance} miles)\n\n`;
|
1565 |
+
});
|
1566 |
+
addMessage(propertiesMessage);
|
1567 |
+
} else {
|
1568 |
+
addMessage(data.response);
|
1569 |
+
}
|
1570 |
+
typingIndicator.style.display = "none";
|
1571 |
+
})
|
1572 |
+
.catch(error => {
|
1573 |
+
console.error("Error:", error);
|
1574 |
+
addMessage("I apologize, but I encountered an error. Please try again.");
|
1575 |
+
typingIndicator.style.display = "none";
|
1576 |
+
});
|
1577 |
+
};
|
1578 |
+
|
1579 |
+
// Update recommend requests
|
1580 |
+
handleRecommendInput = function() {
|
1581 |
+
const message = recommendInput.value.trim().toLowerCase();
|
1582 |
+
if (!message) return;
|
1583 |
+
|
1584 |
+
addRecommendMessage(message, true);
|
1585 |
+
recommendInput.value = "";
|
1586 |
+
recommendTypingIndicator.style.display = "block";
|
1587 |
+
|
1588 |
+
const fetchOptions = {
|
1589 |
+
method: "POST",
|
1590 |
+
headers: {
|
1591 |
+
"Content-Type": "application/json",
|
1592 |
+
},
|
1593 |
+
body: JSON.stringify({
|
1594 |
+
query: message,
|
1595 |
+
session_id: recommendSessionId,
|
1596 |
+
user_plan: plan
|
1597 |
+
})
|
1598 |
+
};
|
1599 |
+
|
1600 |
+
fetch("/recommend", fetchOptions)
|
1601 |
+
.then(response => response.json())
|
1602 |
+
.then(data => {
|
1603 |
+
if (message === 'yes' && data.properties) {
|
1604 |
+
let propertiesMessage = "Here are the 5 nearest properties to your location:\n";
|
1605 |
+
data.properties.forEach(property => {
|
1606 |
+
propertiesMessage += `**${property.PropertyName}** at ${property.Address}, ${property.City}\n`;
|
1607 |
+
propertiesMessage += ` Type: ${property.PropertyType}\n`;
|
1608 |
+
propertiesMessage += ` (Distance: ${property.Distance} miles)\n\n`;
|
1609 |
+
});
|
1610 |
+
addRecommendMessage(propertiesMessage);
|
1611 |
+
} else {
|
1612 |
+
addRecommendMessage(data.response);
|
1613 |
+
}
|
1614 |
+
recommendTypingIndicator.style.display = "none";
|
1615 |
+
})
|
1616 |
+
.catch(error => {
|
1617 |
+
console.error("Error:", error);
|
1618 |
+
addRecommendMessage("I apologize, but I encountered an error. Please try again.");
|
1619 |
+
recommendTypingIndicator.style.display = "none";
|
1620 |
+
});
|
1621 |
+
};
|
1622 |
+
}
|
1623 |
+
});
|
1624 |
+
|
1625 |
+
function displayProperties(properties, plan) {
|
1626 |
+
properties.forEach(function(property) {
|
1627 |
+
// Create image carousel HTML based on propertyImages array
|
1628 |
+
const carouselHTML = property.propertyImages && property.propertyImages.length > 0 ? `
|
1629 |
+
<div class="carousel">
|
1630 |
+
<div class="carousel-images">
|
1631 |
+
${property.propertyImages.map(imgUrl => `
|
1632 |
+
<img src="${imgUrl}" alt="${property.PropertyName}" class="carousel-image">
|
1633 |
+
`).join('')}
|
1634 |
+
</div>
|
1635 |
+
${property.propertyImages.length > 1 ? `
|
1636 |
+
<div class="carousel-nav">
|
1637 |
+
${property.propertyImages.map((_, index) => `
|
1638 |
+
<div class="carousel-dot ${index === 0 ? 'active' : ''}" data-index="${index}"></div>
|
1639 |
+
`).join('')}
|
1640 |
+
</div>
|
1641 |
+
` : ''}
|
1642 |
+
</div>
|
1643 |
+
` : '<div class="no-image">No images available</div>';
|
1644 |
+
|
1645 |
+
const propertyElement = $(`
|
1646 |
+
<div class="property">
|
1647 |
+
<div class="image-container">
|
1648 |
+
${carouselHTML}
|
1649 |
+
<button class="favorite-button">
|
1650 |
+
<i class="fas fa-heart"></i>
|
1651 |
+
</button>
|
1652 |
+
</div>
|
1653 |
+
<div class="property-details">
|
1654 |
+
<div class="property-header">
|
1655 |
+
<h2>${property["PropertyName"]}</h2>
|
1656 |
+
<span class="property-type">${property["PropertyType"]}</span>
|
1657 |
+
</div>
|
1658 |
+
<div style="display: flex; justify-content: space-between; flex-direction:column; gap:10px">
|
1659 |
+
<span>
|
1660 |
+
<strong>Location</strong>
|
1661 |
+
${property["Address"]}, ${property["City"]}, ${property["State"]}, ${property["Country"]}
|
1662 |
+
</span>
|
1663 |
+
<span>
|
1664 |
+
<strong>Zip Code</strong>
|
1665 |
+
${property["ZipCode"]}
|
1666 |
+
</span>
|
1667 |
+
</div>
|
1668 |
+
<div class="property-info">
|
1669 |
+
<div class="scrollable-info" style="display: flex; justify-content: space-between">
|
1670 |
+
<span><strong>Market Value</strong><br>${property["MarketValue"]}</span>
|
1671 |
+
<span><strong>Total Square Feet</strong><br>${property["TotalSquareFeet"]}</span>
|
1672 |
+
<span><strong>Beds</strong><br>${property["Beds"]}</span>
|
1673 |
+
<span><strong>Baths</strong><br>${property["Baths"]}</span>
|
1674 |
+
<span><strong>Year Built</strong><br>${property["YearBuilt"]}</span>
|
1675 |
+
<span><strong>Status</strong><br>${property["PropertyStatus"]}</span>
|
1676 |
+
</div>
|
1677 |
+
</div>
|
1678 |
+
<div class="accordion-section">
|
1679 |
+
<div class="accordion-header">
|
1680 |
+
<strong>Description</strong>
|
1681 |
+
<i class="fas fa-chevron-down accordion-arrow"></i>
|
1682 |
+
</div>
|
1683 |
+
<div class="accordion-content">
|
1684 |
+
<p>${property["Description"]}</p>
|
1685 |
+
</div>
|
1686 |
+
</div>
|
1687 |
+
<div class="accordion-section">
|
1688 |
+
<div class="accordion-header">
|
1689 |
+
<strong>Key Features</strong>
|
1690 |
+
<i class="fas fa-chevron-down accordion-arrow"></i>
|
1691 |
+
</div>
|
1692 |
+
<div class="accordion-content">
|
1693 |
+
<div class="key-features">
|
1694 |
+
${property["KeyFeatures"].split(', ').map(feature => `
|
1695 |
+
<span class="feature-pill">${feature}</span>
|
1696 |
+
`).join('')}
|
1697 |
+
</div>
|
1698 |
+
</div>
|
1699 |
+
</div>
|
1700 |
+
<div class="accordion-section">
|
1701 |
+
<div class="accordion-header">
|
1702 |
+
<strong>Nearby Amenities</strong>
|
1703 |
+
<i class="fas fa-chevron-down accordion-arrow"></i>
|
1704 |
+
</div>
|
1705 |
+
<div class="accordion-content">
|
1706 |
+
<div class="amenities-pills">
|
1707 |
+
${property["NearbyAmenities"].split(', ').map(amenity => `
|
1708 |
+
<span class="amenity-pill">${amenity}</span>
|
1709 |
+
`).join('')}
|
1710 |
+
</div>
|
1711 |
+
</div>
|
1712 |
+
</div>
|
1713 |
+
<div class="construction-status">
|
1714 |
+
<p><strong>Property Status:</strong> ${property["PropertyStatus"]}</p>
|
1715 |
+
<p><strong>Year Built:</strong> ${property["YearBuilt"]}</p>
|
1716 |
+
</div>
|
1717 |
+
<div class="accordion-section">
|
1718 |
+
<div class="accordion-header">
|
1719 |
+
<strong>Agent Details</strong>
|
1720 |
+
<i class="fas fa-chevron-down accordion-arrow"></i>
|
1721 |
+
</div>
|
1722 |
+
<div class="accordion-content">
|
1723 |
+
<div style="display: flex; justify-content: space-between">
|
1724 |
+
<span><strong>Agent Name</strong><br>${property["AgentName"]}</span>
|
1725 |
+
<span><strong>Agent Phone</strong><br>${property["AgentPhoneNumber"]}</span>
|
1726 |
+
<span><strong>Agent Email</strong><br>${property["AgentEmail"]}</span>
|
1727 |
+
</div>
|
1728 |
+
</div>
|
1729 |
+
</div>
|
1730 |
+
</div>
|
1731 |
+
</div>
|
1732 |
+
`);
|
1733 |
+
$('#results').append(propertyElement);
|
1734 |
+
|
1735 |
+
// Add carousel functionality if multiple images
|
1736 |
+
if (property.propertyImages && property.propertyImages.length > 1) {
|
1737 |
+
const carousel = propertyElement.find('.carousel');
|
1738 |
+
const images = carousel.find('.carousel-images');
|
1739 |
+
const dots = carousel.find('.carousel-dot');
|
1740 |
+
let currentIndex = 0;
|
1741 |
+
|
1742 |
+
// Update carousel display
|
1743 |
+
function updateCarousel(index) {
|
1744 |
+
images.css('transform', `translateX(-${index * 100}%)`);
|
1745 |
+
dots.removeClass('active').eq(index).addClass('active');
|
1746 |
+
currentIndex = index;
|
1747 |
+
}
|
1748 |
+
|
1749 |
+
// Click handlers for dots
|
1750 |
+
dots.on('click', function() {
|
1751 |
+
const index = $(this).data('index');
|
1752 |
+
updateCarousel(index);
|
1753 |
+
});
|
1754 |
+
|
1755 |
+
// Optional: Add swipe functionality
|
1756 |
+
let touchStartX = 0;
|
1757 |
+
let touchEndX = 0;
|
1758 |
+
|
1759 |
+
carousel.on('touchstart', function(e) {
|
1760 |
+
touchStartX = e.touches[0].clientX;
|
1761 |
+
});
|
1762 |
+
|
1763 |
+
carousel.on('touchend', function(e) {
|
1764 |
+
touchEndX = e.changedTouches[0].clientX;
|
1765 |
+
const diff = touchStartX - touchEndX;
|
1766 |
+
|
1767 |
+
if (Math.abs(diff) > 50) { // Minimum swipe distance
|
1768 |
+
if (diff > 0 && currentIndex < property.propertyImages.length - 1) {
|
1769 |
+
// Swipe left
|
1770 |
+
updateCarousel(currentIndex + 1);
|
1771 |
+
} else if (diff < 0 && currentIndex > 0) {
|
1772 |
+
// Swipe right
|
1773 |
+
updateCarousel(currentIndex - 1);
|
1774 |
+
}
|
1775 |
+
}
|
1776 |
+
});
|
1777 |
+
|
1778 |
+
// Optional: Auto-advance carousel
|
1779 |
+
let autoAdvance = setInterval(() => {
|
1780 |
+
const nextIndex = (currentIndex + 1) % property.propertyImages.length;
|
1781 |
+
updateCarousel(nextIndex);
|
1782 |
+
}, 5000); // Change image every 5 seconds
|
1783 |
+
|
1784 |
+
// Pause auto-advance on hover
|
1785 |
+
carousel.hover(
|
1786 |
+
() => clearInterval(autoAdvance),
|
1787 |
+
() => {
|
1788 |
+
autoAdvance = setInterval(() => {
|
1789 |
+
const nextIndex = (currentIndex + 1) % property.propertyImages.length;
|
1790 |
+
updateCarousel(nextIndex);
|
1791 |
+
}, 5000);
|
1792 |
+
}
|
1793 |
+
);
|
1794 |
+
}
|
1795 |
+
|
1796 |
+
propertyElement.find('.accordion-header').each(function() {
|
1797 |
+
$(this).on('click', function() {
|
1798 |
+
const $header = $(this);
|
1799 |
+
const $content = $header.next('.accordion-content');
|
1800 |
+
const $arrow = $header.find('.accordion-arrow');
|
1801 |
+
|
1802 |
+
$content.slideToggle(300);
|
1803 |
+
$arrow.toggleClass('active');
|
1804 |
+
|
1805 |
+
const $otherHeaders = $header.closest('.property-details')
|
1806 |
+
.find('.accordion-header').not($header);
|
1807 |
+
|
1808 |
+
$otherHeaders.each(function() {
|
1809 |
+
$(this).next('.accordion-content').slideUp(300);
|
1810 |
+
$(this).find('.accordion-arrow').removeClass('active');
|
1811 |
+
});
|
1812 |
+
});
|
1813 |
+
});
|
1814 |
+
|
1815 |
+
propertyElement.find('.favorite-button').on('click', function() {
|
1816 |
+
$(this).toggleClass('active');
|
1817 |
+
});
|
1818 |
+
});
|
1819 |
+
}
|
1820 |
+
</script>
|
1821 |
+
|
1822 |
+
</body>
|
1823 |
+
</html>
|
1824 |
+
|