blockenters commited on
Commit
1ac6501
Β·
1 Parent(s): e05182e
Files changed (3) hide show
  1. app.py +2 -2
  2. app2.py +56 -0
  3. requirements.txt +2 -1
app.py CHANGED
@@ -20,7 +20,7 @@ def load_model(model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"):
20
  def main():
21
  st.set_page_config(page_title="DeepSeek-R1 Chatbot", page_icon="πŸ€–")
22
  st.title("DeepSeek-R1 기반 λŒ€ν™”ν˜• 챗봇")
23
- st.write("DeepSeek-R1-Distill-Qwen-1.5B λͺ¨λΈμ„ ν™œμš©ν•œ ν•œκ΅­μ–΄ λŒ€ν™” ν…ŒμŠ€νŠΈμš© 데λͺ¨μž…λ‹ˆλ‹€.")
24
 
25
  # μ„Έμ…˜ μŠ€ν…Œμ΄νŠΈ μ΄ˆκΈ°ν™”
26
  if "chat_history_ids" not in st.session_state:
@@ -44,7 +44,7 @@ def main():
44
  st.write(bot_text)
45
 
46
  # μ±„νŒ… μž…λ ₯μ°½
47
- user_input = st.chat_input("λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...")
48
 
49
  if user_input:
50
  # μ‚¬μš©μž λ©”μ‹œμ§€ ν‘œμ‹œ
 
20
  def main():
21
  st.set_page_config(page_title="DeepSeek-R1 Chatbot", page_icon="πŸ€–")
22
  st.title("DeepSeek-R1 기반 λŒ€ν™”ν˜• 챗봇")
23
+ st.write("DeepSeek-R1-Distill-Qwen-1.5B λͺ¨λΈμ„ μ‚¬μš©ν•œ λŒ€ν™” ν…ŒμŠ€νŠΈμš© 데λͺ¨μž…λ‹ˆλ‹€.")
24
 
25
  # μ„Έμ…˜ μŠ€ν…Œμ΄νŠΈ μ΄ˆκΈ°ν™”
26
  if "chat_history_ids" not in st.session_state:
 
44
  st.write(bot_text)
45
 
46
  # μ±„νŒ… μž…λ ₯μ°½
47
+ user_input = st.chat_input("μ˜μ–΄λ‘œ λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...")
48
 
49
  if user_input:
50
  # μ‚¬μš©μž λ©”μ‹œμ§€ ν‘œμ‹œ
app2.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import pipeline
3
+ import gradio as gr
4
+
5
+ # λͺ¨λΈ λ‘œλ“œ
6
+ def load_model(model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"):
7
+ pipe = pipeline(
8
+ "text-generation",
9
+ model=model_name,
10
+ device_map="auto",
11
+ torch_dtype=torch.float16,
12
+ trust_remote_code=True,
13
+ truncation=True,
14
+ max_new_tokens=2048
15
+ )
16
+ return pipe
17
+
18
+ # 챗봇 응닡 생성 ν•¨μˆ˜
19
+ def chat(message, history):
20
+ pipe = load_model()
21
+ prompt = f"Human: {message}\n\nAssistant:"
22
+
23
+ response = pipe(
24
+ prompt,
25
+ max_new_tokens=2048,
26
+ temperature=0.7,
27
+ do_sample=True,
28
+ truncation=True,
29
+ pad_token_id=50256
30
+ )
31
+
32
+ # 응닡 μΆ”μΆœ 및 처리
33
+ try:
34
+ bot_text = response[0]["generated_text"]
35
+ bot_text = bot_text.split("Assistant:")[-1].strip()
36
+ if "</think>" in bot_text:
37
+ bot_text = bot_text.split("</think>")[-1].strip()
38
+ except:
39
+ bot_text = "Sorry, there was a problem generating the response."
40
+
41
+ return bot_text
42
+
43
+ # Gradio μΈν„°νŽ˜μ΄μŠ€ μ„€μ •
44
+ demo = gr.ChatInterface(
45
+ chat,
46
+ chatbot=gr.Chatbot(height=600),
47
+ textbox=gr.Textbox(placeholder="Enter your message...", container=False, scale=7),
48
+ title="DeepSeek-R1 Chatbot",
49
+ description="DeepSeek-R1-Distill-Qwen-1.5B λͺ¨λΈμ„ μ‚¬μš©ν•œ λŒ€ν™” ν…ŒμŠ€νŠΈμš© 데λͺ¨μž…λ‹ˆλ‹€.",
50
+ examples=["Hello", "Who are you?", "What can you do?"],
51
+ theme=gr.themes.Soft()
52
+ )
53
+
54
+ # μ„œλ²„ μ‹€ν–‰
55
+ if __name__ == "__main__":
56
+ demo.launch(share=True, server_name="0.0.0.0")
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
  streamlit
2
  torch
3
  transformers>=4.35.0
4
- accelerate>=0.26.0
 
 
1
  streamlit
2
  torch
3
  transformers>=4.35.0
4
+ accelerate
5
+ gradio>=4.0.0