Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,16 +3,19 @@ from sentence_transformers import SentenceTransformer, util
|
|
3 |
import openai
|
4 |
import os
|
5 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
|
6 |
# Initialize paths and model identifiers for easy configuration and maintenance
|
7 |
filename = "output_topic_details.txt" # Path to the file storing song recommendation details
|
8 |
retrieval_model_name = 'output/sentence-transformer-finetuned/'
|
9 |
openai.api_key = os.environ["OPENAI_API_KEY"]
|
|
|
10 |
# Attempt to load the necessary models and provide feedback on success or failure
|
11 |
try:
|
12 |
retrieval_model = SentenceTransformer(retrieval_model_name)
|
13 |
print("Models loaded successfully.")
|
14 |
except Exception as e:
|
15 |
print(f"Failed to load models: {e}")
|
|
|
16 |
def load_and_preprocess_text(filename):
|
17 |
"""
|
18 |
Load and preprocess text from a file, removing empty lines and stripping whitespace.
|
@@ -25,11 +28,12 @@ def load_and_preprocess_text(filename):
|
|
25 |
except Exception as e:
|
26 |
print(f"Failed to load or preprocess text: {e}")
|
27 |
return []
|
|
|
28 |
segments = load_and_preprocess_text(filename)
|
29 |
-
|
|
|
30 |
"""
|
31 |
-
Find the most relevant text
|
32 |
-
This version finds the best match based on the content of the query.
|
33 |
"""
|
34 |
try:
|
35 |
# Lowercase the query for better matching
|
@@ -39,20 +43,21 @@ def find_relevant_segment(user_query, segments):
|
|
39 |
segment_embeddings = retrieval_model.encode(segments)
|
40 |
# Compute cosine similarities between the query and the segments
|
41 |
similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
|
42 |
-
# Find the
|
43 |
-
|
44 |
-
# Return the most relevant
|
45 |
-
return segments[
|
46 |
except Exception as e:
|
47 |
-
print(f"Error in finding relevant
|
48 |
-
return
|
49 |
-
|
|
|
50 |
"""
|
51 |
Generate a response providing song recommendations based on mood.
|
52 |
"""
|
53 |
try:
|
54 |
system_message = "You are a music recommendation chatbot designed to suggest songs based on mood, catering to Gen Z's taste in music."
|
55 |
-
user_message = f"User query: {user_query}. Recommended songs: {
|
56 |
messages = [
|
57 |
{"role": "system", "content": system_message},
|
58 |
{"role": "user", "content": user_message}
|
@@ -70,17 +75,19 @@ def generate_response(user_query, relevant_segment):
|
|
70 |
except Exception as e:
|
71 |
print(f"Error in generating response: {e}")
|
72 |
return f"Error in generating response: {e}"
|
|
|
73 |
def query_model(question):
|
74 |
"""
|
75 |
Process a question, find relevant information, and generate a response.
|
76 |
"""
|
77 |
if question == "":
|
78 |
return "Welcome to the Song Recommendation Bot! Ask me for song recommendations based on your mood."
|
79 |
-
|
80 |
-
if not
|
81 |
return "Could not find specific song recommendations. Please refine your question."
|
82 |
-
response = generate_response(question,
|
83 |
return response
|
|
|
84 |
# Define the welcome message and specific topics the chatbot can provide information about
|
85 |
welcome_message = """
|
86 |
# 🎶: Welcome to the Song Recommendation Bot!
|
@@ -98,8 +105,9 @@ topics = """
|
|
98 |
- Nostalgic
|
99 |
- Self care
|
100 |
"""
|
|
|
101 |
# Setup the Gradio Blocks interface with custom layout components
|
102 |
-
with gr.Blocks() as demo:
|
103 |
gr.Markdown(welcome_message) # Display the formatted welcome message
|
104 |
with gr.Row():
|
105 |
with gr.Column():
|
@@ -111,4 +119,4 @@ with gr.Blocks() as demo:
|
|
111 |
submit_button = gr.Button("Submit")
|
112 |
submit_button.click(fn=query_model, inputs=question, outputs=answer)
|
113 |
# Launch the Gradio app to allow user interaction
|
114 |
-
demo.launch(share=True)
|
|
|
3 |
import openai
|
4 |
import os
|
5 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
6 |
+
|
7 |
# Initialize paths and model identifiers for easy configuration and maintenance
|
8 |
filename = "output_topic_details.txt" # Path to the file storing song recommendation details
|
9 |
retrieval_model_name = 'output/sentence-transformer-finetuned/'
|
10 |
openai.api_key = os.environ["OPENAI_API_KEY"]
|
11 |
+
|
12 |
# Attempt to load the necessary models and provide feedback on success or failure
|
13 |
try:
|
14 |
retrieval_model = SentenceTransformer(retrieval_model_name)
|
15 |
print("Models loaded successfully.")
|
16 |
except Exception as e:
|
17 |
print(f"Failed to load models: {e}")
|
18 |
+
|
19 |
def load_and_preprocess_text(filename):
|
20 |
"""
|
21 |
Load and preprocess text from a file, removing empty lines and stripping whitespace.
|
|
|
28 |
except Exception as e:
|
29 |
print(f"Failed to load or preprocess text: {e}")
|
30 |
return []
|
31 |
+
|
32 |
segments = load_and_preprocess_text(filename)
|
33 |
+
|
34 |
+
def find_relevant_segments(user_query, segments, top_k=5):
|
35 |
"""
|
36 |
+
Find the top-k most relevant text segments for a user's query using cosine similarity among sentence embeddings.
|
|
|
37 |
"""
|
38 |
try:
|
39 |
# Lowercase the query for better matching
|
|
|
43 |
segment_embeddings = retrieval_model.encode(segments)
|
44 |
# Compute cosine similarities between the query and the segments
|
45 |
similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0]
|
46 |
+
# Find the indices of the top-k most similar segments
|
47 |
+
top_k_indices = similarities.topk(top_k).indices
|
48 |
+
# Return the most relevant segments
|
49 |
+
return [segments[idx] for idx in top_k_indices]
|
50 |
except Exception as e:
|
51 |
+
print(f"Error in finding relevant segments: {e}")
|
52 |
+
return []
|
53 |
+
|
54 |
+
def generate_response(user_query, relevant_segments):
|
55 |
"""
|
56 |
Generate a response providing song recommendations based on mood.
|
57 |
"""
|
58 |
try:
|
59 |
system_message = "You are a music recommendation chatbot designed to suggest songs based on mood, catering to Gen Z's taste in music."
|
60 |
+
user_message = f"User query: {user_query}. Recommended songs: {', '.join(relevant_segments)}"
|
61 |
messages = [
|
62 |
{"role": "system", "content": system_message},
|
63 |
{"role": "user", "content": user_message}
|
|
|
75 |
except Exception as e:
|
76 |
print(f"Error in generating response: {e}")
|
77 |
return f"Error in generating response: {e}"
|
78 |
+
|
79 |
def query_model(question):
|
80 |
"""
|
81 |
Process a question, find relevant information, and generate a response.
|
82 |
"""
|
83 |
if question == "":
|
84 |
return "Welcome to the Song Recommendation Bot! Ask me for song recommendations based on your mood."
|
85 |
+
relevant_segments = find_relevant_segments(question, segments)
|
86 |
+
if not relevant_segments:
|
87 |
return "Could not find specific song recommendations. Please refine your question."
|
88 |
+
response = generate_response(question, relevant_segments)
|
89 |
return response
|
90 |
+
|
91 |
# Define the welcome message and specific topics the chatbot can provide information about
|
92 |
welcome_message = """
|
93 |
# 🎶: Welcome to the Song Recommendation Bot!
|
|
|
105 |
- Nostalgic
|
106 |
- Self care
|
107 |
"""
|
108 |
+
|
109 |
# Setup the Gradio Blocks interface with custom layout components
|
110 |
+
with gr.Blocks(css="custom.css") as demo:
|
111 |
gr.Markdown(welcome_message) # Display the formatted welcome message
|
112 |
with gr.Row():
|
113 |
with gr.Column():
|
|
|
119 |
submit_button = gr.Button("Submit")
|
120 |
submit_button.click(fn=query_model, inputs=question, outputs=answer)
|
121 |
# Launch the Gradio app to allow user interaction
|
122 |
+
demo.launch(share=True)
|