Spaces:
Sleeping
Sleeping
| # app.py | |
| import streamlit as st | |
| from huggingface_hub import InferenceClient | |
| from datetime import datetime | |
| # Configure page | |
| st.set_page_config( | |
| page_title="DeepSeek Chatbot - ruslanmv.com", | |
| page_icon="🤖", | |
| layout="centered", | |
| initial_sidebar_state="expanded" | |
| ) | |
| # Initialize session state | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Sidebar controls | |
| with st.sidebar: | |
| st.title("🤖 Chatbot Settings") | |
| st.markdown("Created by [ruslanmv.com](https://ruslanmv.com/)") | |
| # Model selection | |
| selected_model = st.selectbox( | |
| "Choose Model", | |
| options=[ | |
| "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", | |
| "deepseek-ai/DeepSeek-R1", | |
| "deepseek-ai/DeepSeek-R1-Zero" | |
| ], | |
| index=0 | |
| ) | |
| # System message | |
| system_message = st.text_area( | |
| "System Message", | |
| value="You are a friendly Chatbot created by ruslanmv.com", | |
| height=100 | |
| ) | |
| # Generation parameters | |
| max_new_tokens = st.slider( | |
| "Max new tokens", | |
| min_value=1, | |
| max_value=4000, | |
| value=512, | |
| step=50 | |
| ) | |
| temperature = st.slider( | |
| "Temperature", | |
| min_value=0.1, | |
| max_value=4.0, | |
| value=1.0, | |
| step=0.1 | |
| ) | |
| top_p = st.slider( | |
| "Top-p (nucleus sampling)", | |
| min_value=0.1, | |
| max_value=1.0, | |
| value=0.9, | |
| step=0.1 | |
| ) | |
| # Optional HF Token | |
| hf_token = st.text_input( | |
| "HuggingFace Token (optional)", | |
| type="password", | |
| help="Enter your HuggingFace token if required for model access" | |
| ) | |
| # Main chat interface | |
| st.title("💬 DeepSeek Chatbot") | |
| st.caption("🚀 A conversational AI powered by DeepSeek models") | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| if "timestamp" in message: | |
| st.caption(f"_{message['timestamp']}_") | |
| # Chat input and processing | |
| if prompt := st.chat_input("Type your message..."): | |
| # Add user message to history | |
| st.session_state.messages.append({ | |
| "role": "user", | |
| "content": prompt, | |
| "timestamp": datetime.now().strftime("%H:%M:%S") | |
| }) | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| st.caption(f"_{st.session_state.messages[-1]['timestamp']}_") | |
| # Create full prompt with system message | |
| full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:" | |
| # Create client and generate response | |
| client = InferenceClient(model=selected_model, token=hf_token) | |
| # Display assistant response | |
| with st.chat_message("assistant"): | |
| response = st.write_stream( | |
| client.text_generation( | |
| full_prompt, | |
| max_new_tokens=max_new_tokens, | |
| temperature=temperature, | |
| top_p=top_p, | |
| stream=True | |
| ) | |
| ) | |
| timestamp = datetime.now().strftime("%H:%M:%S") | |
| st.caption(f"_{timestamp}_") | |
| # Add assistant response to history | |
| st.session_state.messages.append({ | |
| "role": "assistant", | |
| "content": response, | |
| "timestamp": timestamp | |
| }) | |
| # Optional debug information | |
| # st.sidebar.markdown("---") | |
| # st.sidebar.json(st.session_state.messages) |