Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# gradio final ver ----------------------------
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import requests
|
6 |
+
from PIL import Image
|
7 |
+
import torch
|
8 |
+
from transformers import AutoProcessor, AutoModelForZeroShotImageClassification, pipeline
|
9 |
+
import gradio as gr
|
10 |
+
import openai
|
11 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
12 |
+
import ast
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
###### ๊ธฐ๋ณธ ์ค์ ######
|
17 |
+
# OpenAI API ํค ์ค์
|
18 |
+
openai.api_key = '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'
|
19 |
+
|
20 |
+
# ๋ชจ๋ธ ๋ฐ ํ๋ก์ธ์ ๋ก๋
|
21 |
+
processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
22 |
+
model_clip = AutoModelForZeroShotImageClassification.from_pretrained("openai/clip-vit-large-patch14")
|
23 |
+
tokenizer = KoBERTTokenizer.from_pretrained('skt/kobert-base-v1')
|
24 |
+
|
25 |
+
# ์์ธก ๋ ์ด๋ธ
|
26 |
+
labels = ['a photo of a happy face', 'a photo of a joyful face', 'a photo of a loving face',
|
27 |
+
'a photo of an angry face', 'a photo of a melancholic face', 'a photo of a lonely face']
|
28 |
+
|
29 |
+
###### ์ผ๊ตด ๊ฐ์ ๋ฒกํฐ ์์ธก ํจ์ ######
|
30 |
+
def predict_face_emotion(image):
|
31 |
+
# ์ด๋ฏธ์ง๊ฐ None์ด๊ฑฐ๋ ์๋ชป๋ ๊ฒฝ์ฐ
|
32 |
+
if image is None:
|
33 |
+
return np.zeros(len(labels)) # ๋น ๋ฒกํฐ ๋ฐํ
|
34 |
+
|
35 |
+
# PIL ์ด๋ฏธ์ง๋ฅผ RGB๋ก ๋ณํ
|
36 |
+
image = image.convert("RGB")
|
37 |
+
|
38 |
+
# CLIP ๋ชจ๋ธ์ processor๋ฅผ ์ด์ฉํ ์ ์ฒ๋ฆฌ
|
39 |
+
inputs = processor(text=labels, images=image, return_tensors="pt", padding=True)
|
40 |
+
|
41 |
+
# pixel_values๊ฐ 4์ฐจ์์ธ์ง ํ์ธ ํ ๊ฐ์ ๋ณํ
|
42 |
+
pixel_values = inputs["pixel_values"] # (batch_size, channels, height, width)
|
43 |
+
|
44 |
+
# CLIP ๋ชจ๋ธ ์์ธก: forward์ ์ฌ๋ฐ๋ฅธ ์
๋ ฅ ์ ๋ฌ
|
45 |
+
with torch.no_grad():
|
46 |
+
outputs = model_clip(pixel_values=pixel_values, input_ids=inputs["input_ids"])
|
47 |
+
|
48 |
+
# ํ๋ฅ ๊ฐ ๊ณ์ฐ
|
49 |
+
probs = outputs.logits_per_image.softmax(dim=1)[0]
|
50 |
+
return probs.numpy()
|
51 |
+
|
52 |
+
###### ํ
์คํธ ๊ฐ์ ๋ฒกํฐ ์์ธก ํจ์ ######
|
53 |
+
sentence_emotions = []
|
54 |
+
|
55 |
+
def predict_text_emotion(predict_sentence):
|
56 |
+
|
57 |
+
if not isinstance(predict_sentence, str):
|
58 |
+
predict_sentence = str(predict_sentence)
|
59 |
+
|
60 |
+
data = [predict_sentence, '0']
|
61 |
+
dataset_another = [data]
|
62 |
+
|
63 |
+
another_test = BERTDataset(dataset_another, 0, 1, tokenizer, vocab, max_len, True, False)
|
64 |
+
test_dataloader = torch.utils.data.DataLoader(another_test, batch_size=1, num_workers=5)
|
65 |
+
|
66 |
+
model.eval()
|
67 |
+
|
68 |
+
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(test_dataloader):
|
69 |
+
token_ids = token_ids.long().to(device)
|
70 |
+
segment_ids = segment_ids.long().to(device)
|
71 |
+
|
72 |
+
out = model(token_ids, valid_length, segment_ids)
|
73 |
+
for i in out:
|
74 |
+
logits = i.detach().cpu().numpy()
|
75 |
+
emotions = [value.item() for value in i]
|
76 |
+
sentence_emotions.append(emotions)
|
77 |
+
return sentence_emotions[0] # ์ต์ข
๋ฆฌ์คํธ ๋ฐํ
|
78 |
+
|
79 |
+
###### ์ต์ข
๊ฐ์ ๋ฒกํฐ ๊ณ์ฐ ######
|
80 |
+
def generate_final_emotion_vector(diary_input, image_input):
|
81 |
+
# ํ
์คํธ ๊ฐ์ ๋ฒกํฐ ์์ธก
|
82 |
+
text_vector = predict_text_emotion(diary_input)
|
83 |
+
# ์ผ๊ตด ๊ฐ์ ๋ฒกํฐ ์์ธก
|
84 |
+
image_vector = predict_face_emotion(image_input)
|
85 |
+
text_vector = np.array(text_vector, dtype=float)
|
86 |
+
image_vector = np.array(image_vector, dtype=float)
|
87 |
+
|
88 |
+
print(text_vector)
|
89 |
+
print(image_vector)
|
90 |
+
|
91 |
+
# ์ต์ข
๊ฐ์ ๋ฒกํฐ ๊ฐ์ค์น ์ ์ฉ
|
92 |
+
return (text_vector * 0.7) + (image_vector * 0.3)
|
93 |
+
|
94 |
+
####### ์ฝ์ฌ์ธ ์ ์ฌ๋ ํจ์ ######
|
95 |
+
def cosine_similarity_fn(vec1, vec2):
|
96 |
+
dot_product = np.dot(vec1, vec2)
|
97 |
+
norm_vec1 = np.linalg.norm(vec1)
|
98 |
+
norm_vec2 = np.linalg.norm(vec2)
|
99 |
+
if norm_vec1 == 0 or norm_vec2 == 0:
|
100 |
+
return np.nan # ์ ๋ก ๋ฒกํฐ์ธ ๊ฒฝ์ฐ NaN ๋ฐํ
|
101 |
+
return dot_product / (norm_vec1 * norm_vec2)
|
102 |
+
|
103 |
+
|
104 |
+
####### ์ด๋ฏธ์ง ๋ค์ด๋ก๋ ํจ์ (PIL ๊ฐ์ฒด ๋ฐํ) ######
|
105 |
+
def download_image(image_url):
|
106 |
+
try:
|
107 |
+
response = requests.get(image_url)
|
108 |
+
response.raise_for_status()
|
109 |
+
return Image.open(requests.get(image_url, stream=True).raw)
|
110 |
+
except Exception as e:
|
111 |
+
print(f"์ด๋ฏธ์ง ๋ค์ด๋ก๋ ์ค๋ฅ: {e}")
|
112 |
+
return None
|
113 |
+
|
114 |
+
# ์คํ์ผ ์ต์
|
115 |
+
options = {
|
116 |
+
1: "๐ผ ์น๊ทผํ",
|
117 |
+
2: "๐ฅ ํธ๋ ๋ํ MZ์ธ๋",
|
118 |
+
3: "๐ ์ ๋จธ๋ฌ์คํ ์ฅ๋๊พธ๋ฌ๊ธฐ",
|
119 |
+
4: "๐ง ์ฐจ๋ถํ ๋ช
์๊ฐ",
|
120 |
+
5: "๐จ ์ฐฝ์์ ์ธ ์์ ๊ฐ",
|
121 |
+
}
|
122 |
+
|
123 |
+
# ์ผ๊ธฐ ๋ถ์ ํจ์
|
124 |
+
def chatbot_diary_with_image(style_option, diary_input, image_input, playlist_input):
|
125 |
+
|
126 |
+
style = options.get(int(style_option.split('.')[0]), "๐ผ ์น๊ทผํ")
|
127 |
+
|
128 |
+
# GPT ์๋ต (์ผ๊ธฐ ์ฝ๋ฉํธ)
|
129 |
+
try:
|
130 |
+
response_comment = openai.ChatCompletion.create(
|
131 |
+
model="gpt-4-turbo",
|
132 |
+
messages=[{"role": "system", "content": f"๋๋ {style} ์ฑ๋ด์ด์ผ."}, {"role": "user", "content": diary_input}],
|
133 |
+
)
|
134 |
+
comment = response_comment.choices[0].message.content
|
135 |
+
except Exception as e:
|
136 |
+
comment = f"๐ฌ ์ค๋ฅ: {e}"
|
137 |
+
|
138 |
+
# GPT ๊ธฐ๋ฐ ์ผ๊ธฐ ์ฃผ์ ์ถ์ฒ
|
139 |
+
try:
|
140 |
+
topics = get_initial_response(style_option, diary_input)
|
141 |
+
except Exception as e:
|
142 |
+
topics = f"๐ ์ฃผ์ ์ถ์ฒ ์ค๋ฅ: {e}"
|
143 |
+
|
144 |
+
# DALLยทE 3 ์ด๋ฏธ์ง ์์ฑ ์์ฒญ (3D ์คํ์ผ ์บ๋ฆญํฐ)
|
145 |
+
try:
|
146 |
+
response = openai.Image.create(
|
147 |
+
model="dall-e-3",
|
148 |
+
prompt=(
|
149 |
+
f"{diary_input}๋ฅผ ๋ฐ์ํด์ ๊ฐ์ ์ ํํํ๋ 3D ์คํ์ผ์ ์ผ๋ฌ์คํธ ์บ๋ฆญํฐ๋ฅผ ๊ทธ๋ ค์ค. "
|
150 |
+
"์บ๋ฆญํฐ๋ ๋ถ๋๋ฝ๊ณ ๋ฅ๊ทผ ๋์์ธ์ ํ์ ์ด ๊ฐ์ ์ ์ ๋๋ฌ๋ด์ผ ํด. "
|
151 |
+
"๊ฐ์ ์ ์๊ฐ์ ์ผ๋ก ํํํ ์ ์๋ ์ํ์ด๋ ์์ ์์ง์ ํฌํจํด์ค. "
|
152 |
+
"๊ฐ์ ์ ๋ถ์๊ธฐ๋ฅผ ๋ฐ์ํ๋ ์ ๋ช
ํ๊ณ ๊นจ๋ํ ์์์ ์ฌ์ฉํ๊ณ , ์บ๋ฆญํฐ๊ฐ ์ญ๋์ ์ด๊ณ ์ฌ๋ฏธ์๋ ์์ธ๋ฅผ ์ทจํ ์ ์๋๋ก ํด์ค. "
|
153 |
+
"์ด๋ฏธ์ง์๋ ํ๋์ ์บ๋ฆญํฐ๋ง ๋์ค๊ฒ ํด์ค."
|
154 |
+
"๋ฐฐ๊ฒฝ์ ๋จ์ํ๊ณ ๋ฐ์ ์์์ผ๋ก ์ค์ ํด์ ์บ๋ฆญํฐ๊ฐ ๊ฐ์กฐ๋ ์ ์๋๋ก ํด์ค."
|
155 |
+
),
|
156 |
+
size="1024x1024",
|
157 |
+
n=1
|
158 |
+
)
|
159 |
+
# URL ๊ฐ์ ธ์ค๊ธฐ ๋ฐ ๋ค์ด๋ก๋
|
160 |
+
image_url = response['data'][0]['url']
|
161 |
+
print(f"Generated Image URL: {image_url}") # URL ํ์ธ
|
162 |
+
image = download_image(image_url)
|
163 |
+
except Exception as e:
|
164 |
+
print(f"์ด๋ฏธ์ง ์์ฑ ์ค๋ฅ: {e}") # ์ค๋ฅ ์์ธ ์ถ๋ ฅ
|
165 |
+
image = None
|
166 |
+
|
167 |
+
# ์ฌ์ฉ์ ์ต์ข
๊ฐ์ ๋ฒกํฐ
|
168 |
+
final_user_emotions = generate_final_emotion_vector(diary_input,image_input)
|
169 |
+
|
170 |
+
# ๊ฐ ๋
ธ๋์ ๋ํ ์ฝ์ฌ์ธ ์ ์ฌ๋ ๊ณ์ฐ
|
171 |
+
similarities = [cosine_similarity_fn(final_user_emotions, song_vec) for song_vec in emotions]
|
172 |
+
|
173 |
+
#์ ํจํ ์ ์ฌ๋ ํํฐ๋ง
|
174 |
+
valid_indices = [i for i, sim in enumerate(similarities) if not np.isnan(sim)]
|
175 |
+
filtered_similarities = [similarities[i] for i in valid_indices]
|
176 |
+
|
177 |
+
recommendations = np.argsort(filtered_similarities)[::-1] # ๋์ ์ ์ฌ๋ ์์ผ๋ก ์ ๋ ฌ
|
178 |
+
results_df = pd.DataFrame({
|
179 |
+
'Singer' : melon_emotions['singer'].iloc[recommendations].values,
|
180 |
+
'title' : melon_emotions['Title'].iloc[recommendations].values,
|
181 |
+
'genre' : melon_emotions['genre'].iloc[recommendations].values,
|
182 |
+
'Cosine Similarity': [similarities[idx] for idx in recommendations]
|
183 |
+
})
|
184 |
+
|
185 |
+
# ๊ฐ์ค์น ๊ฐ ์ค์
|
186 |
+
gamma = 0.3
|
187 |
+
|
188 |
+
similar_playlists = results_df.head(5)
|
189 |
+
similar_playlists = pd.merge(similar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner")
|
190 |
+
similar_playlists = similar_playlists[["title", "Emotions", "singer"]]
|
191 |
+
|
192 |
+
dissimilar_playlists = results_df.tail(5)
|
193 |
+
dissimilar_playlists = pd.merge(dissimilar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner")
|
194 |
+
dissimilar_playlists = dissimilar_playlists[["title", "Emotions", "singer"]]
|
195 |
+
|
196 |
+
#๊ฐ์ ๊ณผ ์ ์ฌํ ํ๋ ์ด๋ฆฌ์คํธ
|
197 |
+
if playlist_input == '๋น์ทํ':
|
198 |
+
results = []
|
199 |
+
seen_songs = set(similar_playlists["title"].values) # ์ด๊ธฐ seen_songs์ similar_playlists์ ๊ณก๋ค์ ์ถ๊ฐ
|
200 |
+
|
201 |
+
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ
|
202 |
+
user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1)
|
203 |
+
|
204 |
+
for index, row in similar_playlists.iterrows():
|
205 |
+
song_title = row["title"]
|
206 |
+
song_singer = row["singer"]
|
207 |
+
song_vector = np.array(row["Emotions"]).reshape(1, -1)
|
208 |
+
|
209 |
+
song_results = []
|
210 |
+
for i, emotion_vec in enumerate(emotions):
|
211 |
+
emotion_title = melon_emotions.iloc[i]["Title"]
|
212 |
+
emotion_singer = melon_emotions.iloc[i]["singer"]
|
213 |
+
emotion_vec = np.array(emotion_vec).reshape(1, -1)
|
214 |
+
|
215 |
+
# similar_playlists์ ์๋ ๊ณก๊ณผ seen_songs์ ์๋ ๊ณก์ ์ ์ธ
|
216 |
+
if (
|
217 |
+
emotion_title != song_title and
|
218 |
+
emotion_title not in seen_songs
|
219 |
+
):
|
220 |
+
try:
|
221 |
+
# ๊ณก ๊ฐ ์ ์ฌ๋(Song-Song Similarity)
|
222 |
+
song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0]
|
223 |
+
|
224 |
+
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ์์ ์ ์ฌ๋(User-Song Similarity)
|
225 |
+
user_song_similarity = cosine_similarity(user_emotion_vector, emotion_vec)[0][0]
|
226 |
+
|
227 |
+
# Final Score ๊ณ์ฐ
|
228 |
+
final_score = gamma * song_song_similarity + (1 - gamma) * user_song_similarity
|
229 |
+
|
230 |
+
song_results.append({
|
231 |
+
"Title": emotion_title,
|
232 |
+
"Singer": emotion_singer,
|
233 |
+
"Song-Song Similarity": song_song_similarity,
|
234 |
+
"User-Song Similarity": user_song_similarity,
|
235 |
+
"Final Score": final_score
|
236 |
+
})
|
237 |
+
except ValueError as e:
|
238 |
+
print(f"Error with {song_title} vs {emotion_title}: {e}")
|
239 |
+
continue
|
240 |
+
|
241 |
+
# Final Score๋ฅผ ๊ธฐ์ค์ผ๋ก ์์ 3๊ณก ์ ํ
|
242 |
+
song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3]
|
243 |
+
seen_songs.update([entry["Title"] for entry in song_results])
|
244 |
+
|
245 |
+
results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results})
|
246 |
+
|
247 |
+
# ๊ฒฐ๊ณผ ์ถ๋ ฅ
|
248 |
+
for result in results:
|
249 |
+
print(f"{result['Singer']} - {result['Song Title']}")
|
250 |
+
for entry in result["Top 3 Similarities"]:
|
251 |
+
print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}")
|
252 |
+
print(f" (Song-Song Similarity: {entry['Song-Song Similarity']:.4f}, User-Song Similarity: {entry['User-Song Similarity']:.4f})")
|
253 |
+
print("-" * 30)
|
254 |
+
|
255 |
+
#๋ฐ๋ ํ๋ ์ด๋ฆฌ์คํธ
|
256 |
+
if playlist_input == '์๋ฐ๋':
|
257 |
+
results = []
|
258 |
+
seen_songs = set()
|
259 |
+
|
260 |
+
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ
|
261 |
+
user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1)
|
262 |
+
|
263 |
+
for index, row in dissimilar_playlists.iterrows():
|
264 |
+
song_title = row["title"]
|
265 |
+
song_singer = row["singer"]
|
266 |
+
song_vector = np.array(row["Emotions"]).reshape(1, -1)
|
267 |
+
|
268 |
+
song_results = []
|
269 |
+
for i, emotion_vec in enumerate(emotions):
|
270 |
+
emotion_title = melon_emotions.iloc[i]["Title"]
|
271 |
+
emotion_singer = melon_emotions.iloc[i]["singer"]
|
272 |
+
emotion_vec = np.array(emotion_vec).reshape(1, -1)
|
273 |
+
|
274 |
+
if (
|
275 |
+
emotion_title != song_title and
|
276 |
+
emotion_title not in dissimilar_playlists["title"].values and
|
277 |
+
emotion_title not in seen_songs
|
278 |
+
):
|
279 |
+
try:
|
280 |
+
# ๊ณก ๊ฐ ์ ์ฌ๋(Song-Song Similarity)
|
281 |
+
song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0]
|
282 |
+
|
283 |
+
# ์ฌ์ฉ์ ๊ฐ์ ๋ฒกํฐ์์ ๋ฐ๋ ์ ์ฌ๋(User-Song Dissimilarity)
|
284 |
+
opposite_user_song_similarity = 1 - cosine_similarity(user_emotion_vector, emotion_vec)[0][0]
|
285 |
+
|
286 |
+
# Final Score ๊ณ์ฐ
|
287 |
+
final_score = gamma * song_song_similarity + (1 - gamma) * opposite_user_song_similarity
|
288 |
+
|
289 |
+
song_results.append({
|
290 |
+
"Title": emotion_title,
|
291 |
+
"Singer": emotion_singer,
|
292 |
+
"Song-Song Similarity": song_song_similarity,
|
293 |
+
"User-Song Dissimilarity": opposite_user_song_similarity,
|
294 |
+
"Final Score": final_score
|
295 |
+
})
|
296 |
+
except ValueError as e:
|
297 |
+
print(f"Error with {song_title} vs {emotion_title}: {e}")
|
298 |
+
continue
|
299 |
+
|
300 |
+
# Final Score๋ฅผ ๊ธฐ์ค์ผ๋ก ์์ 3๊ณก ์ ํ (๊ฐ์ด ํฐ ๊ณก์ด ๋ฐ๋๋๋ ๊ณก)
|
301 |
+
song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3]
|
302 |
+
seen_songs.update(entry["Title"] for entry in song_results)
|
303 |
+
|
304 |
+
results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results})
|
305 |
+
|
306 |
+
# ๊ฒฐ๊ณผ ์ถ๋ ฅ
|
307 |
+
for result in results:
|
308 |
+
print(f"{result['Singer']} - {result['Song Title']}")
|
309 |
+
for entry in result["Top 3 Similarities"]:
|
310 |
+
print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}")
|
311 |
+
print(f' (Song-Song Similarity: {entry["Song-Song Similarity"]:.4f}, User-Song Dissimilarity: {entry["User-Song Dissimilarity"]:.4f})')
|
312 |
+
print("-" * 30)
|
313 |
+
# ๋ฐ์ดํฐํ๋ ์ ๋ณํ์ ์ํ ๋ฆฌ์คํธ ์์ฑ
|
314 |
+
df_rows = []
|
315 |
+
|
316 |
+
for result in results:
|
317 |
+
song_title = result['Song Title']
|
318 |
+
song_singer = result['Singer']
|
319 |
+
main_song_info = f"{song_singer} - {song_title}"
|
320 |
+
|
321 |
+
for entry in result["Top 3 Similarities"]:
|
322 |
+
combined_info = f"{entry['Singer']} - {entry['Title']}"
|
323 |
+
df_rows.append({"1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ": main_song_info, "2nd ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ": combined_info})
|
324 |
+
|
325 |
+
# ๋ฐ์ดํฐํ๋ ์ ์์ฑ
|
326 |
+
final_music_playlist_recommendation = pd.DataFrame(df_rows)
|
327 |
+
|
328 |
+
# ๊ณก ์ ๋ชฉ ๊ทธ๋ฃนํํ์ฌ ์ฒซ ๋ฒ์งธ ํ์๋ง ๊ณก ์ ๋ชฉ ํ์
|
329 |
+
final_music_playlist_recommendation["1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ"] = final_music_playlist_recommendation.groupby("1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ")["1st ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ"].transform(
|
330 |
+
lambda x: [x.iloc[0]] + [""] * (len(x) - 1)
|
331 |
+
)
|
332 |
+
|
333 |
+
return final_music_playlist_recommendation, comment, topics, image
|
334 |
+
|
335 |
+
# ์ผ๊ธฐ ์ฃผ์ ์ถ์ฒ ํจ์
|
336 |
+
def get_initial_response(style, sentence):
|
337 |
+
style = options.get(int(style.split('.')[0]), "๐ผ ์น๊ทผํ")
|
338 |
+
system_prompt_momentum = (
|
339 |
+
f"๋๋ {style}์ ์ฑ๋ด์ด์ผ. ์ฌ์ฉ์๊ฐ ์์ฑํ ์ผ๊ธฐ๋ฅผ ๋ฐํ์ผ๋ก ์๊ฐ์ ์ ๋ฆฌํ๊ณ ๋ด๋ฉด์ ๋์๋ณผ ์ ์๋๋ก "
|
340 |
+
"๋์์ฃผ๋ ๊ตฌ์ฒด์ ์ธ ์ผ๊ธฐ ์ฝํ
์ธ ๋ ์ง๋ฌธ 4-5๊ฐ๋ฅผ ์ถ์ฒํด์ค."
|
341 |
+
)
|
342 |
+
try:
|
343 |
+
response = openai.ChatCompletion.create(
|
344 |
+
model="gpt-4-turbo",
|
345 |
+
messages=[
|
346 |
+
{"role": "system", "content": system_prompt_momentum},
|
347 |
+
{"role": "user", "content": sentence}
|
348 |
+
],
|
349 |
+
temperature=1
|
350 |
+
)
|
351 |
+
return response.choices[0].message.content
|
352 |
+
except Exception as e:
|
353 |
+
return f"๐ ์ฃผ์ ์ถ์ฒ ์ค๋ฅ: {e}"
|
354 |
+
|
355 |
+
# Gradio ์ธํฐํ์ด์ค
|
356 |
+
with gr.Blocks() as app:
|
357 |
+
gr.Markdown("# โจ ์ค๋งํธ ๊ฐ์ ์ผ๊ธฐ ์๋น์ค โจ\n\n ์ค๋์ ํ๋ฃจ๋ฅผ ๊ธฐ๋กํ๋ฉด, ๊ทธ์ ๋ง๋ ํ๋ ์ด๋ฆฌ์คํธ์ ์ผ๊ธฐ ํ๊ณ ์ฝํ
์ธ ๋ฅผ ์๋์ผ๋ก ์์ฑํด๋๋ฆฝ๋๋ค!")
|
358 |
+
with gr.Row():
|
359 |
+
with gr.Column():
|
360 |
+
chatbot_style = gr.Radio(
|
361 |
+
choices=[f"{k}. {v}" for k, v in options.items()],
|
362 |
+
label="๐ค ์ํ๋ ์ฑ๋ด ์คํ์ผ ์ ํ"
|
363 |
+
)
|
364 |
+
diary_input = gr.Textbox(label="๐ ์ค๋์ ํ๋ฃจ ๊ธฐ๋กํ๊ธฐ", placeholder="ex)์ค๋ ์ํ๊ฐ์ ๋ง์๋ ๊ฑธ ๋ง์ด ๋จน์ด์ ์์ฒญ ์ ๋ฌ์ด")
|
365 |
+
image_input = gr.Image(type="pil", label="๐ท ์ผ๊ตด ํ์ ์ฌ์ง ์
๋ก๋")
|
366 |
+
playlist_input = gr.Radio(["๋น์ทํ", "์๋ฐ๋"], label="๐ง ์ค๋์ ๊ฐ์ ๊ณผ ใ
ใ
๋๋ ํ๋ ์ด๋ฆฌ์คํธ ์ถ์ฒ ๋ฐ๊ธฐ")
|
367 |
+
submit_btn = gr.Button("๐ ๋ถ์ ์์")
|
368 |
+
|
369 |
+
with gr.Column():
|
370 |
+
output_playlist = gr.Dataframe(label="๐ง ์ถ์ฒ ํ๋ ์ด๋ฆฌ์คํธ ")
|
371 |
+
output_comment = gr.Textbox(label="๐ฌ AI ์ฝ๋ฉํธ")
|
372 |
+
output_topics = gr.Textbox(label="๐ ์ถ์ฒ ์ผ๊ธฐ ์ฝํ
์ธ ")
|
373 |
+
output_image = gr.Image(label="๐ผ๏ธ ์์ฑ๋ ์ค๋์ ๊ฐ์ ์บ๋ฆญํฐ", type="pil", width=512, height=512)
|
374 |
+
|
375 |
+
# ๋ฒํผ ํด๋ฆญ ์ด๋ฒคํธ ์ฐ๊ฒฐ
|
376 |
+
submit_btn.click(
|
377 |
+
fn=chatbot_diary_with_image,
|
378 |
+
inputs=[chatbot_style, diary_input, image_input, playlist_input],
|
379 |
+
outputs=[output_playlist, output_comment, output_topics, output_image]
|
380 |
+
)
|
381 |
+
|
382 |
+
# ์ฑ ์คํ
|
383 |
+
app.launch(debug=True)
|