dazzleun-7 commited on
Commit
29cd263
ยท
verified ยท
1 Parent(s): 34e8d76

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +383 -0
app.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # gradio final ver ----------------------------
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ import requests
6
+ from PIL import Image
7
+ import torch
8
+ from transformers import AutoProcessor, AutoModelForZeroShotImageClassification, pipeline
9
+ import gradio as gr
10
+ import openai
11
+ from sklearn.metrics.pairwise import cosine_similarity
12
+ import ast
13
+
14
+
15
+
16
+ ###### ๊ธฐ๋ณธ ์„ค์ • ######
17
+ # OpenAI API ํ‚ค ์„ค์ •
18
+ openai.api_key = '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'
19
+
20
+ # ๋ชจ๋ธ ๋ฐ ํ”„๋กœ์„ธ์„œ ๋กœ๋“œ
21
+ processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
22
+ model_clip = AutoModelForZeroShotImageClassification.from_pretrained("openai/clip-vit-large-patch14")
23
+ tokenizer = KoBERTTokenizer.from_pretrained('skt/kobert-base-v1')
24
+
25
+ # ์˜ˆ์ธก ๋ ˆ์ด๋ธ”
26
+ labels = ['a photo of a happy face', 'a photo of a joyful face', 'a photo of a loving face',
27
+ 'a photo of an angry face', 'a photo of a melancholic face', 'a photo of a lonely face']
28
+
29
+ ###### ์–ผ๊ตด ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก ํ•จ์ˆ˜ ######
30
+ def predict_face_emotion(image):
31
+ # ์ด๋ฏธ์ง€๊ฐ€ None์ด๊ฑฐ๋‚˜ ์ž˜๋ชป๋œ ๊ฒฝ์šฐ
32
+ if image is None:
33
+ return np.zeros(len(labels)) # ๋นˆ ๋ฒกํ„ฐ ๋ฐ˜ํ™˜
34
+
35
+ # PIL ์ด๋ฏธ์ง€๋ฅผ RGB๋กœ ๋ณ€ํ™˜
36
+ image = image.convert("RGB")
37
+
38
+ # CLIP ๋ชจ๋ธ์˜ processor๋ฅผ ์ด์šฉํ•œ ์ „์ฒ˜๋ฆฌ
39
+ inputs = processor(text=labels, images=image, return_tensors="pt", padding=True)
40
+
41
+ # pixel_values๊ฐ€ 4์ฐจ์›์ธ์ง€ ํ™•์ธ ํ›„ ๊ฐ•์ œ ๋ณ€ํ™˜
42
+ pixel_values = inputs["pixel_values"] # (batch_size, channels, height, width)
43
+
44
+ # CLIP ๋ชจ๋ธ ์˜ˆ์ธก: forward์— ์˜ฌ๋ฐ”๋ฅธ ์ž…๋ ฅ ์ „๋‹ฌ
45
+ with torch.no_grad():
46
+ outputs = model_clip(pixel_values=pixel_values, input_ids=inputs["input_ids"])
47
+
48
+ # ํ™•๋ฅ ๊ฐ’ ๊ณ„์‚ฐ
49
+ probs = outputs.logits_per_image.softmax(dim=1)[0]
50
+ return probs.numpy()
51
+
52
+ ###### ํ…์ŠคํŠธ ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก ํ•จ์ˆ˜ ######
53
+ sentence_emotions = []
54
+
55
+ def predict_text_emotion(predict_sentence):
56
+
57
+ if not isinstance(predict_sentence, str):
58
+ predict_sentence = str(predict_sentence)
59
+
60
+ data = [predict_sentence, '0']
61
+ dataset_another = [data]
62
+
63
+ another_test = BERTDataset(dataset_another, 0, 1, tokenizer, vocab, max_len, True, False)
64
+ test_dataloader = torch.utils.data.DataLoader(another_test, batch_size=1, num_workers=5)
65
+
66
+ model.eval()
67
+
68
+ for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(test_dataloader):
69
+ token_ids = token_ids.long().to(device)
70
+ segment_ids = segment_ids.long().to(device)
71
+
72
+ out = model(token_ids, valid_length, segment_ids)
73
+ for i in out:
74
+ logits = i.detach().cpu().numpy()
75
+ emotions = [value.item() for value in i]
76
+ sentence_emotions.append(emotions)
77
+ return sentence_emotions[0] # ์ตœ์ข… ๋ฆฌ์ŠคํŠธ ๋ฐ˜ํ™˜
78
+
79
+ ###### ์ตœ์ข… ๊ฐ์ • ๋ฒกํ„ฐ ๊ณ„์‚ฐ ######
80
+ def generate_final_emotion_vector(diary_input, image_input):
81
+ # ํ…์ŠคํŠธ ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก
82
+ text_vector = predict_text_emotion(diary_input)
83
+ # ์–ผ๊ตด ๊ฐ์ • ๋ฒกํ„ฐ ์˜ˆ์ธก
84
+ image_vector = predict_face_emotion(image_input)
85
+ text_vector = np.array(text_vector, dtype=float)
86
+ image_vector = np.array(image_vector, dtype=float)
87
+
88
+ print(text_vector)
89
+ print(image_vector)
90
+
91
+ # ์ตœ์ข… ๊ฐ์ • ๋ฒกํ„ฐ ๊ฐ€์ค‘์น˜ ์ ์šฉ
92
+ return (text_vector * 0.7) + (image_vector * 0.3)
93
+
94
+ ####### ์ฝ”์‚ฌ์ธ ์œ ์‚ฌ๋„ ํ•จ์ˆ˜ ######
95
+ def cosine_similarity_fn(vec1, vec2):
96
+ dot_product = np.dot(vec1, vec2)
97
+ norm_vec1 = np.linalg.norm(vec1)
98
+ norm_vec2 = np.linalg.norm(vec2)
99
+ if norm_vec1 == 0 or norm_vec2 == 0:
100
+ return np.nan # ์ œ๋กœ ๋ฒกํ„ฐ์ธ ๊ฒฝ์šฐ NaN ๋ฐ˜ํ™˜
101
+ return dot_product / (norm_vec1 * norm_vec2)
102
+
103
+
104
+ ####### ์ด๋ฏธ์ง€ ๋‹ค์šด๋กœ๋“œ ํ•จ์ˆ˜ (PIL ๊ฐ์ฒด ๋ฐ˜ํ™˜) ######
105
+ def download_image(image_url):
106
+ try:
107
+ response = requests.get(image_url)
108
+ response.raise_for_status()
109
+ return Image.open(requests.get(image_url, stream=True).raw)
110
+ except Exception as e:
111
+ print(f"์ด๋ฏธ์ง€ ๋‹ค์šด๋กœ๋“œ ์˜ค๋ฅ˜: {e}")
112
+ return None
113
+
114
+ # ์Šคํƒ€์ผ ์˜ต์…˜
115
+ options = {
116
+ 1: "๐ŸŒผ ์นœ๊ทผํ•œ",
117
+ 2: "๐Ÿ”ฅ ํŠธ๋ Œ๋””ํ•œ MZ์„ธ๋Œ€",
118
+ 3: "๐Ÿ˜„ ์œ ๋จธ๋Ÿฌ์Šคํ•œ ์žฅ๋‚œ๊พธ๋Ÿฌ๊ธฐ",
119
+ 4: "๐Ÿง˜ ์ฐจ๋ถ„ํ•œ ๋ช…์ƒ๊ฐ€",
120
+ 5: "๐ŸŽจ ์ฐฝ์˜์ ์ธ ์˜ˆ์ˆ ๊ฐ€",
121
+ }
122
+
123
+ # ์ผ๊ธฐ ๋ถ„์„ ํ•จ์ˆ˜
124
+ def chatbot_diary_with_image(style_option, diary_input, image_input, playlist_input):
125
+
126
+ style = options.get(int(style_option.split('.')[0]), "๐ŸŒผ ์นœ๊ทผํ•œ")
127
+
128
+ # GPT ์‘๋‹ต (์ผ๊ธฐ ์ฝ”๋ฉ˜ํŠธ)
129
+ try:
130
+ response_comment = openai.ChatCompletion.create(
131
+ model="gpt-4-turbo",
132
+ messages=[{"role": "system", "content": f"๋„ˆ๋Š” {style} ์ฑ—๋ด‡์ด์•ผ."}, {"role": "user", "content": diary_input}],
133
+ )
134
+ comment = response_comment.choices[0].message.content
135
+ except Exception as e:
136
+ comment = f"๐Ÿ’ฌ ์˜ค๋ฅ˜: {e}"
137
+
138
+ # GPT ๊ธฐ๋ฐ˜ ์ผ๊ธฐ ์ฃผ์ œ ์ถ”์ฒœ
139
+ try:
140
+ topics = get_initial_response(style_option, diary_input)
141
+ except Exception as e:
142
+ topics = f"๐Ÿ“ ์ฃผ์ œ ์ถ”์ฒœ ์˜ค๋ฅ˜: {e}"
143
+
144
+ # DALLยทE 3 ์ด๋ฏธ์ง€ ์ƒ์„ฑ ์š”์ฒญ (3D ์Šคํƒ€์ผ ์บ๋ฆญํ„ฐ)
145
+ try:
146
+ response = openai.Image.create(
147
+ model="dall-e-3",
148
+ prompt=(
149
+ f"{diary_input}๋ฅผ ๋ฐ˜์˜ํ•ด์„œ ๊ฐ์ •์„ ํ‘œํ˜„ํ•˜๋Š” 3D ์Šคํƒ€์ผ์˜ ์ผ๋Ÿฌ์ŠคํŠธ ์บ๋ฆญํ„ฐ๋ฅผ ๊ทธ๋ ค์ค˜. "
150
+ "์บ๋ฆญํ„ฐ๋Š” ๋ถ€๋“œ๋Ÿฝ๊ณ  ๋‘ฅ๊ทผ ๋””์ž์ธ์— ํ‘œ์ •์ด ๊ฐ์ •์„ ์ž˜ ๋“œ๋Ÿฌ๋‚ด์•ผ ํ•ด. "
151
+ "๊ฐ์ •์„ ์‹œ๊ฐ์ ์œผ๋กœ ํ‘œํ˜„ํ•  ์ˆ˜ ์žˆ๋Š” ์†Œํ’ˆ์ด๋‚˜ ์ž‘์€ ์ƒ์ง•์„ ํฌํ•จํ•ด์ค˜. "
152
+ "๊ฐ์ •์˜ ๋ถ„์œ„๊ธฐ๋ฅผ ๋ฐ˜์˜ํ•˜๋Š” ์„ ๋ช…ํ•˜๊ณ  ๊นจ๋—ํ•œ ์ƒ‰์ƒ์„ ์‚ฌ์šฉํ•˜๊ณ , ์บ๋ฆญํ„ฐ๊ฐ€ ์—ญ๋™์ ์ด๊ณ  ์žฌ๋ฏธ์žˆ๋Š” ์ž์„ธ๋ฅผ ์ทจํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•ด์ค˜. "
153
+ "์ด๋ฏธ์ง€์—๋Š” ํ•˜๋‚˜์˜ ์บ๋ฆญํ„ฐ๋งŒ ๋‚˜์˜ค๊ฒŒ ํ•ด์ค˜."
154
+ "๋ฐฐ๊ฒฝ์€ ๋‹จ์ˆœํ•˜๊ณ  ๋ฐ์€ ์ƒ‰์ƒ์œผ๋กœ ์„ค์ •ํ•ด์„œ ์บ๋ฆญํ„ฐ๊ฐ€ ๊ฐ•์กฐ๋  ์ˆ˜ ์žˆ๋„๋ก ํ•ด์ค˜."
155
+ ),
156
+ size="1024x1024",
157
+ n=1
158
+ )
159
+ # URL ๊ฐ€์ ธ์˜ค๊ธฐ ๋ฐ ๋‹ค์šด๋กœ๋“œ
160
+ image_url = response['data'][0]['url']
161
+ print(f"Generated Image URL: {image_url}") # URL ํ™•์ธ
162
+ image = download_image(image_url)
163
+ except Exception as e:
164
+ print(f"์ด๋ฏธ์ง€ ์ƒ์„ฑ ์˜ค๋ฅ˜: {e}") # ์˜ค๋ฅ˜ ์ƒ์„ธ ์ถœ๋ ฅ
165
+ image = None
166
+
167
+ # ์‚ฌ์šฉ์ž ์ตœ์ข… ๊ฐ์ • ๋ฒกํ„ฐ
168
+ final_user_emotions = generate_final_emotion_vector(diary_input,image_input)
169
+
170
+ # ๊ฐ ๋…ธ๋ž˜์— ๋Œ€ํ•œ ์ฝ”์‚ฌ์ธ ์œ ์‚ฌ๋„ ๊ณ„์‚ฐ
171
+ similarities = [cosine_similarity_fn(final_user_emotions, song_vec) for song_vec in emotions]
172
+
173
+ #์œ ํšจํ•œ ์œ ์‚ฌ๋„ ํ•„ํ„ฐ๋ง
174
+ valid_indices = [i for i, sim in enumerate(similarities) if not np.isnan(sim)]
175
+ filtered_similarities = [similarities[i] for i in valid_indices]
176
+
177
+ recommendations = np.argsort(filtered_similarities)[::-1] # ๋†’์€ ์œ ์‚ฌ๋„ ์ˆœ์œผ๋กœ ์ •๋ ฌ
178
+ results_df = pd.DataFrame({
179
+ 'Singer' : melon_emotions['singer'].iloc[recommendations].values,
180
+ 'title' : melon_emotions['Title'].iloc[recommendations].values,
181
+ 'genre' : melon_emotions['genre'].iloc[recommendations].values,
182
+ 'Cosine Similarity': [similarities[idx] for idx in recommendations]
183
+ })
184
+
185
+ # ๊ฐ€์ค‘์น˜ ๊ฐ’ ์„ค์ •
186
+ gamma = 0.3
187
+
188
+ similar_playlists = results_df.head(5)
189
+ similar_playlists = pd.merge(similar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner")
190
+ similar_playlists = similar_playlists[["title", "Emotions", "singer"]]
191
+
192
+ dissimilar_playlists = results_df.tail(5)
193
+ dissimilar_playlists = pd.merge(dissimilar_playlists, melon_emotions, left_on="title", right_on="Title", how="inner")
194
+ dissimilar_playlists = dissimilar_playlists[["title", "Emotions", "singer"]]
195
+
196
+ #๊ฐ์ •๊ณผ ์œ ์‚ฌํ•œ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ
197
+ if playlist_input == '๋น„์Šทํ•œ':
198
+ results = []
199
+ seen_songs = set(similar_playlists["title"].values) # ์ดˆ๊ธฐ seen_songs์— similar_playlists์˜ ๊ณก๋“ค์„ ์ถ”๊ฐ€
200
+
201
+ # ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ
202
+ user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1)
203
+
204
+ for index, row in similar_playlists.iterrows():
205
+ song_title = row["title"]
206
+ song_singer = row["singer"]
207
+ song_vector = np.array(row["Emotions"]).reshape(1, -1)
208
+
209
+ song_results = []
210
+ for i, emotion_vec in enumerate(emotions):
211
+ emotion_title = melon_emotions.iloc[i]["Title"]
212
+ emotion_singer = melon_emotions.iloc[i]["singer"]
213
+ emotion_vec = np.array(emotion_vec).reshape(1, -1)
214
+
215
+ # similar_playlists์— ์žˆ๋Š” ๊ณก๊ณผ seen_songs์— ์žˆ๋Š” ๊ณก์€ ์ œ์™ธ
216
+ if (
217
+ emotion_title != song_title and
218
+ emotion_title not in seen_songs
219
+ ):
220
+ try:
221
+ # ๊ณก ๊ฐ„ ์œ ์‚ฌ๋„(Song-Song Similarity)
222
+ song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0]
223
+
224
+ # ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ์™€์˜ ์œ ์‚ฌ๋„(User-Song Similarity)
225
+ user_song_similarity = cosine_similarity(user_emotion_vector, emotion_vec)[0][0]
226
+
227
+ # Final Score ๊ณ„์‚ฐ
228
+ final_score = gamma * song_song_similarity + (1 - gamma) * user_song_similarity
229
+
230
+ song_results.append({
231
+ "Title": emotion_title,
232
+ "Singer": emotion_singer,
233
+ "Song-Song Similarity": song_song_similarity,
234
+ "User-Song Similarity": user_song_similarity,
235
+ "Final Score": final_score
236
+ })
237
+ except ValueError as e:
238
+ print(f"Error with {song_title} vs {emotion_title}: {e}")
239
+ continue
240
+
241
+ # Final Score๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ƒ์œ„ 3๊ณก ์„ ํƒ
242
+ song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3]
243
+ seen_songs.update([entry["Title"] for entry in song_results])
244
+
245
+ results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results})
246
+
247
+ # ๊ฒฐ๊ณผ ์ถœ๋ ฅ
248
+ for result in results:
249
+ print(f"{result['Singer']} - {result['Song Title']}")
250
+ for entry in result["Top 3 Similarities"]:
251
+ print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}")
252
+ print(f" (Song-Song Similarity: {entry['Song-Song Similarity']:.4f}, User-Song Similarity: {entry['User-Song Similarity']:.4f})")
253
+ print("-" * 30)
254
+
255
+ #๋ฐ˜๋Œ€ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ
256
+ if playlist_input == '์ƒ๋ฐ˜๋œ':
257
+ results = []
258
+ seen_songs = set()
259
+
260
+ # ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ
261
+ user_emotion_vector = generate_final_emotion_vector(diary_input, image_input).reshape(1, -1)
262
+
263
+ for index, row in dissimilar_playlists.iterrows():
264
+ song_title = row["title"]
265
+ song_singer = row["singer"]
266
+ song_vector = np.array(row["Emotions"]).reshape(1, -1)
267
+
268
+ song_results = []
269
+ for i, emotion_vec in enumerate(emotions):
270
+ emotion_title = melon_emotions.iloc[i]["Title"]
271
+ emotion_singer = melon_emotions.iloc[i]["singer"]
272
+ emotion_vec = np.array(emotion_vec).reshape(1, -1)
273
+
274
+ if (
275
+ emotion_title != song_title and
276
+ emotion_title not in dissimilar_playlists["title"].values and
277
+ emotion_title not in seen_songs
278
+ ):
279
+ try:
280
+ # ๊ณก ๊ฐ„ ์œ ์‚ฌ๋„(Song-Song Similarity)
281
+ song_song_similarity = cosine_similarity(song_vector, emotion_vec)[0][0]
282
+
283
+ # ์‚ฌ์šฉ์ž ๊ฐ์ • ๋ฒกํ„ฐ์™€์˜ ๋ฐ˜๋Œ€ ์œ ์‚ฌ๋„(User-Song Dissimilarity)
284
+ opposite_user_song_similarity = 1 - cosine_similarity(user_emotion_vector, emotion_vec)[0][0]
285
+
286
+ # Final Score ๊ณ„์‚ฐ
287
+ final_score = gamma * song_song_similarity + (1 - gamma) * opposite_user_song_similarity
288
+
289
+ song_results.append({
290
+ "Title": emotion_title,
291
+ "Singer": emotion_singer,
292
+ "Song-Song Similarity": song_song_similarity,
293
+ "User-Song Dissimilarity": opposite_user_song_similarity,
294
+ "Final Score": final_score
295
+ })
296
+ except ValueError as e:
297
+ print(f"Error with {song_title} vs {emotion_title}: {e}")
298
+ continue
299
+
300
+ # Final Score๋ฅผ ๊ธฐ์ค€์œผ๋กœ ์ƒ์œ„ 3๊ณก ์„ ํƒ (๊ฐ’์ด ํฐ ๊ณก์ด ๋ฐ˜๋Œ€๋˜๋Š” ๊ณก)
301
+ song_results = sorted(song_results, key=lambda x: x["Final Score"], reverse=True)[:3]
302
+ seen_songs.update(entry["Title"] for entry in song_results)
303
+
304
+ results.append({"Song Title": song_title, "Singer": song_singer, "Top 3 Similarities": song_results})
305
+
306
+ # ๊ฒฐ๊ณผ ์ถœ๋ ฅ
307
+ for result in results:
308
+ print(f"{result['Singer']} - {result['Song Title']}")
309
+ for entry in result["Top 3 Similarities"]:
310
+ print(f"{entry['Singer']} - {entry['Title']} : Final Score {entry['Final Score']:.4f}")
311
+ print(f' (Song-Song Similarity: {entry["Song-Song Similarity"]:.4f}, User-Song Dissimilarity: {entry["User-Song Dissimilarity"]:.4f})')
312
+ print("-" * 30)
313
+ # ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ๋ณ€ํ™˜์„ ์œ„ํ•œ ๋ฆฌ์ŠคํŠธ ์ƒ์„ฑ
314
+ df_rows = []
315
+
316
+ for result in results:
317
+ song_title = result['Song Title']
318
+ song_singer = result['Singer']
319
+ main_song_info = f"{song_singer} - {song_title}"
320
+
321
+ for entry in result["Top 3 Similarities"]:
322
+ combined_info = f"{entry['Singer']} - {entry['Title']}"
323
+ df_rows.append({"1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ": main_song_info, "2nd ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ": combined_info})
324
+
325
+ # ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ์ƒ์„ฑ
326
+ final_music_playlist_recommendation = pd.DataFrame(df_rows)
327
+
328
+ # ๊ณก ์ œ๋ชฉ ๊ทธ๋ฃนํ™”ํ•˜์—ฌ ์ฒซ ๋ฒˆ์งธ ํ–‰์—๋งŒ ๊ณก ์ œ๋ชฉ ํ‘œ์‹œ
329
+ final_music_playlist_recommendation["1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ"] = final_music_playlist_recommendation.groupby("1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ")["1st ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ"].transform(
330
+ lambda x: [x.iloc[0]] + [""] * (len(x) - 1)
331
+ )
332
+
333
+ return final_music_playlist_recommendation, comment, topics, image
334
+
335
+ # ์ผ๊ธฐ ์ฃผ์ œ ์ถ”์ฒœ ํ•จ์ˆ˜
336
+ def get_initial_response(style, sentence):
337
+ style = options.get(int(style.split('.')[0]), "๐ŸŒผ ์นœ๊ทผํ•œ")
338
+ system_prompt_momentum = (
339
+ f"๋„ˆ๋Š” {style}์˜ ์ฑ—๋ด‡์ด์•ผ. ์‚ฌ์šฉ์ž๊ฐ€ ์ž‘์„ฑํ•œ ์ผ๊ธฐ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ์ƒ๊ฐ์„ ์ •๋ฆฌํ•˜๊ณ  ๋‚ด๋ฉด์„ ๋Œ์•„๋ณผ ์ˆ˜ ์žˆ๋„๋ก "
340
+ "๋„์™€์ฃผ๋Š” ๊ตฌ์ฒด์ ์ธ ์ผ๊ธฐ ์ฝ˜ํ…์ธ ๋‚˜ ์งˆ๋ฌธ 4-5๊ฐœ๋ฅผ ์ถ”์ฒœํ•ด์ค˜."
341
+ )
342
+ try:
343
+ response = openai.ChatCompletion.create(
344
+ model="gpt-4-turbo",
345
+ messages=[
346
+ {"role": "system", "content": system_prompt_momentum},
347
+ {"role": "user", "content": sentence}
348
+ ],
349
+ temperature=1
350
+ )
351
+ return response.choices[0].message.content
352
+ except Exception as e:
353
+ return f"๐Ÿ“ ์ฃผ์ œ ์ถ”์ฒœ ์˜ค๋ฅ˜: {e}"
354
+
355
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค
356
+ with gr.Blocks() as app:
357
+ gr.Markdown("# โœจ ์Šค๋งˆํŠธ ๊ฐ์ • ์ผ๊ธฐ ์„œ๋น„์Šค โœจ\n\n ์˜ค๋Š˜์˜ ํ•˜๋ฃจ๋ฅผ ๊ธฐ๋กํ•˜๋ฉด, ๊ทธ์— ๋งž๋Š” ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ์™€ ์ผ๊ธฐ ํšŒ๊ณ  ์ฝ˜ํ…์ธ ๋ฅผ ์ž๋™์œผ๋กœ ์ƒ์„ฑํ•ด๋“œ๋ฆฝ๋‹ˆ๋‹ค!")
358
+ with gr.Row():
359
+ with gr.Column():
360
+ chatbot_style = gr.Radio(
361
+ choices=[f"{k}. {v}" for k, v in options.items()],
362
+ label="๐Ÿค– ์›ํ•˜๋Š” ์ฑ—๋ด‡ ์Šคํƒ€์ผ ์„ ํƒ"
363
+ )
364
+ diary_input = gr.Textbox(label="๐Ÿ“œ ์˜ค๋Š˜์˜ ํ•˜๋ฃจ ๊ธฐ๋กํ•˜๊ธฐ", placeholder="ex)์˜ค๋Š˜ ์†Œํ’๊ฐ€์„œ ๋ง›์žˆ๋Š” ๊ฑธ ๋งŽ์ด ๋จน์–ด์„œ ์—„์ฒญ ์‹ ๋‚ฌ์–ด")
365
+ image_input = gr.Image(type="pil", label="๐Ÿ“ท ์–ผ๊ตด ํ‘œ์ • ์‚ฌ์ง„ ์—…๋กœ๋“œ")
366
+ playlist_input = gr.Radio(["๋น„์Šทํ•œ", "์ƒ๋ฐ˜๋œ"], label="๐ŸŽง ์˜ค๋Š˜์˜ ๊ฐ์ •๊ณผ ใ…‡ใ…‡๋˜๋Š” ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ ์ถ”์ฒœ ๋ฐ›๊ธฐ")
367
+ submit_btn = gr.Button("๐Ÿš€ ๋ถ„์„ ์‹œ์ž‘")
368
+
369
+ with gr.Column():
370
+ output_playlist = gr.Dataframe(label="๐ŸŽง ์ถ”์ฒœ ํ”Œ๋ ˆ์ด๋ฆฌ์ŠคํŠธ ")
371
+ output_comment = gr.Textbox(label="๐Ÿ’ฌ AI ์ฝ”๋ฉ˜ํŠธ")
372
+ output_topics = gr.Textbox(label="๐Ÿ“ ์ถ”์ฒœ ์ผ๊ธฐ ์ฝ˜ํ…์ธ ")
373
+ output_image = gr.Image(label="๐Ÿ–ผ๏ธ ์ƒ์„ฑ๋œ ์˜ค๋Š˜์˜ ๊ฐ์ • ์บ๋ฆญํ„ฐ", type="pil", width=512, height=512)
374
+
375
+ # ๋ฒ„ํŠผ ํด๋ฆญ ์ด๋ฒคํŠธ ์—ฐ๊ฒฐ
376
+ submit_btn.click(
377
+ fn=chatbot_diary_with_image,
378
+ inputs=[chatbot_style, diary_input, image_input, playlist_input],
379
+ outputs=[output_playlist, output_comment, output_topics, output_image]
380
+ )
381
+
382
+ # ์•ฑ ์‹คํ–‰
383
+ app.launch(debug=True)