JAMESPARK3 commited on
Commit
dcfbbb8
ยท
verified ยท
1 Parent(s): 8f76517

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -5,10 +5,7 @@ from huggingface_hub import InferenceClient
5
  HF_TOKEN = st.secrets["HF_TOKEN"]
6
 
7
  # Inference Client ์„ค์ • (GRIN-MoE ๋ชจ๋ธ ์‚ฌ์šฉ)
8
- client = InferenceClient(
9
- repo_id="microsoft/GRIN-MoE",
10
- token=HF_TOKEN
11
- )
12
 
13
  # Streamlit ํŽ˜์ด์ง€ ์„ค์ •
14
  st.set_page_config(page_title="GRIN-MoE AI Chat", page_icon="๐Ÿค–")
@@ -25,6 +22,7 @@ user_input = st.text_input("์งˆ๋ฌธ์„ ์ž…๋ ฅํ•˜์„ธ์š”:")
25
  def generate_streaming_response(prompt):
26
  response_text = ""
27
  for message in client.chat_completion(
 
28
  messages=[{"role": "user", "content": prompt}],
29
  max_tokens=500,
30
  stream=True
@@ -54,3 +52,4 @@ if st.session_state.messages:
54
  else:
55
  st.write(f"**AI:** {msg['content']}")
56
 
 
 
5
  HF_TOKEN = st.secrets["HF_TOKEN"]
6
 
7
  # Inference Client ์„ค์ • (GRIN-MoE ๋ชจ๋ธ ์‚ฌ์šฉ)
8
+ client = InferenceClient(token=HF_TOKEN)
 
 
 
9
 
10
  # Streamlit ํŽ˜์ด์ง€ ์„ค์ •
11
  st.set_page_config(page_title="GRIN-MoE AI Chat", page_icon="๐Ÿค–")
 
22
  def generate_streaming_response(prompt):
23
  response_text = ""
24
  for message in client.chat_completion(
25
+ model="microsoft/GRIN-MoE", # ๋ชจ๋ธ ์ด๋ฆ„์„ ๋ช…์‹œ์ ์œผ๋กœ ์ „๋‹ฌ
26
  messages=[{"role": "user", "content": prompt}],
27
  max_tokens=500,
28
  stream=True
 
52
  else:
53
  st.write(f"**AI:** {msg['content']}")
54
 
55
+