ruslanmv commited on
Commit
81a633e
·
verified ·
1 Parent(s): 2c10389

Create app-work-only-1.py

Browse files
Files changed (1) hide show
  1. app-work-only-1.py +102 -0
app-work-only-1.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+
4
+ # Hugging Face API URL (default model)
5
+ API_URL = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
6
+
7
+ # Function to query the Hugging Face API
8
+ def query(payload, api_url):
9
+ headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
10
+ response = requests.post(api_url, headers=headers, json=payload)
11
+ return response.json()
12
+
13
+ # Page configuration
14
+ st.set_page_config(
15
+ page_title="DeepSeek Chatbot - ruslanmv.com",
16
+ page_icon="🤖",
17
+ layout="centered"
18
+ )
19
+
20
+ # Initialize session state for chat history
21
+ if "messages" not in st.session_state:
22
+ st.session_state.messages = []
23
+
24
+ # Sidebar configuration
25
+ with st.sidebar:
26
+ st.header("Model Configuration")
27
+ st.markdown("[Get HuggingFace Token](https://huggingface.co/settings/tokens)")
28
+
29
+ # Dropdown to select model
30
+ model_options = [
31
+ "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
32
+ "deepseek-ai/DeepSeek-R1",
33
+ "deepseek-ai/DeepSeek-R1-Zero"
34
+ ]
35
+ selected_model = st.selectbox("Select Model", model_options, index=0)
36
+
37
+ system_message = st.text_area(
38
+ "System Message",
39
+ value="You are a friendly Chatbot created by ruslanmv.com",
40
+ height=100
41
+ )
42
+
43
+ max_tokens = st.slider(
44
+ "Max Tokens",
45
+ 1, 4000, 512
46
+ )
47
+
48
+ temperature = st.slider(
49
+ "Temperature",
50
+ 0.1, 4.0, 0.7
51
+ )
52
+
53
+ top_p = st.slider(
54
+ "Top-p",
55
+ 0.1, 1.0, 0.9
56
+ )
57
+
58
+ # Chat interface
59
+ st.title("🤖 DeepSeek Chatbot")
60
+ st.caption("Powered by Hugging Face Inference API - Configure in sidebar")
61
+
62
+ # Display chat history
63
+ for message in st.session_state.messages:
64
+ with st.chat_message(message["role"]):
65
+ st.markdown(message["content"])
66
+
67
+ # Handle input
68
+ if prompt := st.chat_input("Type your message..."):
69
+ st.session_state.messages.append({"role": "user", "content": prompt})
70
+
71
+ with st.chat_message("user"):
72
+ st.markdown(prompt)
73
+
74
+ try:
75
+ with st.spinner("Generating response..."):
76
+ # Prepare the payload for the API
77
+ payload = {
78
+ "inputs": prompt,
79
+ "parameters": {
80
+ "max_new_tokens": max_tokens,
81
+ "temperature": temperature,
82
+ "top_p": top_p,
83
+ "return_full_text": False
84
+ }
85
+ }
86
+
87
+ # Query the Hugging Face API using the selected model
88
+ output = query(payload, f"https://api-inference.huggingface.co/models/{selected_model}")
89
+
90
+ # Handle API response
91
+ if isinstance(output, list) and len(output) > 0 and 'generated_text' in output[0]:
92
+ assistant_response = output[0]['generated_text']
93
+
94
+ with st.chat_message("assistant"):
95
+ st.markdown(assistant_response)
96
+
97
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})
98
+ else:
99
+ st.error("Error: Unable to generate a response. Please try again.")
100
+
101
+ except Exception as e:
102
+ st.error(f"Application Error: {str(e)}")