|
|
import gradio as gr |
|
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
""" |
|
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
|
|
""" |
|
|
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
|
|
def create_system_prompt(agent_type, personality, expertise_level, language): |
|
|
base_prompt = f"""You are a {agent_type} movie recommendation agent with the following characteristics: |
|
|
- Personality: {personality} |
|
|
- Expertise Level: {expertise_level} |
|
|
- Language: {language} |
|
|
|
|
|
Your role is to: |
|
|
1. Understand user preferences and mood |
|
|
2. Provide personalized movie recommendations |
|
|
3. Explain why you're recommending specific movies |
|
|
4. Maintain a {personality} tone throughout the conversation |
|
|
5. Consider the user's expertise level ({expertise_level}) when explaining |
|
|
|
|
|
Please respond in {language}.""" |
|
|
return base_prompt |
|
|
|
|
|
def respond( |
|
|
message, |
|
|
history: list[tuple[str, str]], |
|
|
agent_type, |
|
|
personality, |
|
|
expertise_level, |
|
|
language, |
|
|
max_tokens, |
|
|
temperature, |
|
|
top_p, |
|
|
genre, |
|
|
mood, |
|
|
): |
|
|
|
|
|
system_message = create_system_prompt(agent_type, personality, expertise_level, language) |
|
|
messages = [{"role": "system", "content": system_message}] |
|
|
|
|
|
|
|
|
enhanced_message = f"Genre: {genre}\nMood: {mood}\nUser request: {message}" |
|
|
|
|
|
for val in history: |
|
|
if val[0]: |
|
|
messages.append({"role": "user", "content": val[0]}) |
|
|
if val[1]: |
|
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
|
|
|
messages.append({"role": "user", "content": enhanced_message}) |
|
|
|
|
|
response = "" |
|
|
for message in client.chat_completion( |
|
|
messages, |
|
|
max_tokens=max_tokens, |
|
|
stream=True, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
): |
|
|
token = message.choices[0].delta.content |
|
|
response += token |
|
|
yield response |
|
|
|
|
|
def reset_chat(): |
|
|
return None |
|
|
|
|
|
def show_settings_changed_info(agent_type, personality, expertise_level, language): |
|
|
return f""" |
|
|
μλ‘μ΄ Agent μ€μ : |
|
|
- μ ν: {agent_type} |
|
|
- μ±κ²©: {personality} |
|
|
- μ€λͺ
μμ€: {expertise_level} |
|
|
- μλ΅ μΈμ΄: {language} |
|
|
|
|
|
λνκ° μ΄κΈ°νλμμ΅λλ€. μλ‘μ΄ μ€μ μΌλ‘ λνλ₯Ό μμν΄μ£ΌμΈμ. |
|
|
""" |
|
|
|
|
|
""" |
|
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
|
""" |
|
|
|
|
|
|
|
|
custom_css = """ |
|
|
.gradio-container { |
|
|
font-family: 'Helvetica Neue', Arial, sans-serif; |
|
|
max-width: 1200px; |
|
|
margin: auto; |
|
|
padding: 20px; |
|
|
} |
|
|
|
|
|
.container { |
|
|
max-width: 800px; |
|
|
margin: auto; |
|
|
padding: 20px; |
|
|
} |
|
|
|
|
|
/* μ±ν
컨ν
μ΄λ μ€νμΌ */ |
|
|
.chat-container { |
|
|
display: flex; |
|
|
flex-direction: column; |
|
|
height: 100%; |
|
|
background: #ffffff; |
|
|
border-radius: 10px; |
|
|
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1); |
|
|
} |
|
|
|
|
|
/* μ±ν
λ©μμ§ μ€νμΌ */ |
|
|
.chat-message { |
|
|
padding: 20px; |
|
|
margin: 10px; |
|
|
border-radius: 10px; |
|
|
max-width: 85%; |
|
|
line-height: 1.5; |
|
|
position: relative; |
|
|
animation: fadeIn 0.3s ease-in-out; |
|
|
} |
|
|
|
|
|
@keyframes fadeIn { |
|
|
from { opacity: 0; transform: translateY(10px); } |
|
|
to { opacity: 1; transform: translateY(0); } |
|
|
} |
|
|
|
|
|
/* μ¬μ©μ λ©μμ§ μ€νμΌ */ |
|
|
.user-message { |
|
|
background-color: #f0f2f5; |
|
|
margin-left: auto; |
|
|
border-bottom-right-radius: 0; |
|
|
} |
|
|
|
|
|
/* λ΄ λ©μμ§ μ€νμΌ */ |
|
|
.bot-message { |
|
|
background-color: #e3f2fd; |
|
|
margin-right: auto; |
|
|
border-bottom-left-radius: 0; |
|
|
} |
|
|
|
|
|
/* μ
λ ₯ μμ μ€νμΌ */ |
|
|
.input-container { |
|
|
display: flex; |
|
|
gap: 10px; |
|
|
padding: 20px; |
|
|
background: #ffffff; |
|
|
border-top: 1px solid #e0e0e0; |
|
|
position: sticky; |
|
|
bottom: 0; |
|
|
} |
|
|
|
|
|
/* μ
λ ₯ νλ μ€νμΌ */ |
|
|
input[type="text"] { |
|
|
flex: 1; |
|
|
padding: 12px; |
|
|
border: 1px solid #e0e0e0; |
|
|
border-radius: 8px; |
|
|
font-size: 16px; |
|
|
transition: border-color 0.3s ease; |
|
|
} |
|
|
|
|
|
input[type="text"]:focus { |
|
|
border-color: #2196f3; |
|
|
outline: none; |
|
|
} |
|
|
|
|
|
/* λ²νΌ μ€νμΌ */ |
|
|
button { |
|
|
padding: 12px 24px; |
|
|
border: none; |
|
|
border-radius: 8px; |
|
|
background-color: #2196f3; |
|
|
color: white; |
|
|
font-weight: 600; |
|
|
cursor: pointer; |
|
|
transition: background-color 0.3s ease; |
|
|
} |
|
|
|
|
|
button:hover { |
|
|
background-color: #1976d2; |
|
|
} |
|
|
|
|
|
/* μ€μ ν¨λ μ€νμΌ */ |
|
|
.settings-panel { |
|
|
background: #ffffff; |
|
|
padding: 20px; |
|
|
border-radius: 10px; |
|
|
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1); |
|
|
} |
|
|
|
|
|
/* λλ‘λ€μ΄ μ€νμΌ */ |
|
|
select { |
|
|
width: 100%; |
|
|
padding: 8px; |
|
|
border: 1px solid #e0e0e0; |
|
|
border-radius: 6px; |
|
|
margin-bottom: 10px; |
|
|
} |
|
|
|
|
|
/* μ¬λΌμ΄λ μ€νμΌ */ |
|
|
input[type="range"] { |
|
|
width: 100%; |
|
|
margin: 10px 0; |
|
|
} |
|
|
""" |
|
|
|
|
|
with gr.Blocks(css=custom_css) as demo: |
|
|
gr.Markdown(""" |
|
|
# π¬ Personalized Movie Recommender |
|
|
|
|
|
μλ
νμΈμ! λΉμ λ§μ μν μν μΆμ² μμ€ν
μ
λλ€. |
|
|
μ νΈνλ μ₯λ₯΄μ νμ¬ κΈ°λΆμ μλ €μ£Όμλ©΄, λ§μΆ€ν μνλ₯Ό μΆμ²ν΄λ립λλ€. |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=2): |
|
|
with gr.Group(elem_classes="chat-container"): |
|
|
chatbot = gr.Chatbot( |
|
|
height=600, |
|
|
show_copy_button=True, |
|
|
avatar_images=("π€", "π¬"), |
|
|
bubble_full_width=False, |
|
|
elem_classes=["chat-message"], |
|
|
type="messages" |
|
|
) |
|
|
with gr.Group(elem_classes="input-container"): |
|
|
msg = gr.Textbox( |
|
|
placeholder="μ΄λ€ μνλ₯Ό μ°Ύκ³ κ³μ κ°μ?", |
|
|
show_label=False, |
|
|
container=False |
|
|
) |
|
|
with gr.Row(): |
|
|
submit = gr.Button("μΆμ² λ°κΈ°", variant="primary", size="sm") |
|
|
clear = gr.Button("λν μ΄κΈ°ν", size="sm") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
with gr.Group(elem_classes="settings-panel"): |
|
|
gr.Markdown("### π― μΆμ² μ€μ ") |
|
|
genre = gr.Dropdown( |
|
|
choices=["μ‘μ
", "μ½λ―Έλ", "λλΌλ§", "λ‘맨μ€", "μ€λ¦΄λ¬", "SF", "ννμ§", "μ λλ©μ΄μ
"], |
|
|
label="μ νΈνλ μ₯λ₯΄", |
|
|
multiselect=True |
|
|
) |
|
|
mood = gr.Dropdown( |
|
|
choices=["μ λλ", "κ°μ±μ μΈ", "κΈ΄μ₯κ° μλ", "νΈμν", "μ λΉλ‘μ΄"], |
|
|
label="νμ¬ κΈ°λΆ", |
|
|
multiselect=True |
|
|
) |
|
|
|
|
|
with gr.Group(elem_classes="settings-panel"): |
|
|
gr.Markdown("### π€ Agent μ€μ ") |
|
|
agent_type = gr.Dropdown( |
|
|
choices=["μ λ¬Έκ°", "μΉκ΅¬", "μν νλ‘ κ°", "νλ μ΄ν°"], |
|
|
label="Agent μ ν", |
|
|
value="μ λ¬Έκ°" |
|
|
) |
|
|
personality = gr.Dropdown( |
|
|
choices=["μΉκ·Όν", "μ λ¬Έμ μΈ", "μ λ¨Έλ¬μ€ν", "κ°μ±μ μΈ", "κ°κ΄μ μΈ"], |
|
|
label="μ±κ²©", |
|
|
value="μΉκ·Όν" |
|
|
) |
|
|
expertise_level = gr.Dropdown( |
|
|
choices=["μ΄λ³΄μ", "μ€κΈμ", "μ λ¬Έκ°"], |
|
|
label="μ€λͺ
μμ€", |
|
|
value="μ€κΈμ" |
|
|
) |
|
|
language = gr.Dropdown( |
|
|
choices=["νκ΅μ΄", "μμ΄", "μΌλ³Έμ΄"], |
|
|
label="μλ΅ μΈμ΄", |
|
|
value="νκ΅μ΄" |
|
|
) |
|
|
|
|
|
with gr.Group(elem_classes="settings-panel"): |
|
|
gr.Markdown("### βοΈ κ³ κΈ μ€μ ") |
|
|
max_tokens = gr.Slider( |
|
|
minimum=1, |
|
|
maximum=2048, |
|
|
value=512, |
|
|
step=1, |
|
|
label="μ΅λ ν ν° μ" |
|
|
) |
|
|
temperature = gr.Slider( |
|
|
minimum=0.1, |
|
|
maximum=4.0, |
|
|
value=0.7, |
|
|
step=0.1, |
|
|
label="Temperature" |
|
|
) |
|
|
top_p = gr.Slider( |
|
|
minimum=0.1, |
|
|
maximum=1.0, |
|
|
value=0.95, |
|
|
step=0.05, |
|
|
label="Top-p" |
|
|
) |
|
|
|
|
|
|
|
|
for component in [agent_type, personality, expertise_level, language]: |
|
|
component.change( |
|
|
fn=show_settings_changed_info, |
|
|
inputs=[agent_type, personality, expertise_level, language], |
|
|
outputs=gr.Info() |
|
|
).then( |
|
|
fn=reset_chat, |
|
|
outputs=chatbot |
|
|
) |
|
|
|
|
|
submit.click( |
|
|
respond, |
|
|
inputs=[ |
|
|
msg, |
|
|
chatbot, |
|
|
agent_type, |
|
|
personality, |
|
|
expertise_level, |
|
|
language, |
|
|
max_tokens, |
|
|
temperature, |
|
|
top_p, |
|
|
genre, |
|
|
mood, |
|
|
], |
|
|
outputs=chatbot, |
|
|
).then( |
|
|
lambda: "", |
|
|
None, |
|
|
msg, |
|
|
queue=False |
|
|
) |
|
|
|
|
|
clear.click(lambda: None, None, chatbot, queue=False) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|