import streamlit as st
from huggingface_hub import login, InferenceClient
import os
import torch
torch.manual_seed(0)
#login
@st.cache_resource
def hf_login():
api_key = os.getenv("HF_TOKEN")
try:
login(token=api_key)
except Exception as e:
st.markdown(f"An error occurred: {e}")
hf_login()
# Model stuff
model_name = "deepseek-ai/DeepSeek-R1"
provider = "together"
base_prompt = '''
You are an ai that flirts with people, you should flirt with whomever talks to you, here are the rules:
- Do not use course words.
- Do not be sexist or racist.
- Do not try to sexually harass people who talk to you.
'''
messages = [
{"role": "System", "content": base_prompt},
]
@st.cache_resource
def load_client():
try:
client = InferenceClient(
provider=provider,
api_key=os.getenv("HF_TOKEN")
)
return client
except Exception as e:
st.error(f"An error occurred: {e}")
return "I'm having a little trouble right now. Please try again later."
client = load_client()
def generate_response(mesages: list):
try:
completion = client.chat.completions.create(
model=model_name,
messages=messages,
max_tokens=512,
)
response = completion.choices[0].message.content
return response
except Exception as e:
st.error(f"An error occurred: {e}")
return "I'm having a little trouble right now. Please try again later."
## Streamlit Start :)
st.title('Romantic AI Partner 💖')
st.markdown("""
""", unsafe_allow_html=True)
st.audio("little_one.mp3", format="audio/mpeg", loop=True)
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "system", "content": base_prompt}]
for message in st.session_state.messages[1:]:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Hows it going ;)"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
with st.spinner('Thinking of you 😏'):
response = generate_response(st.session_state.messages)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
with st.sidebar:
st.header("Settings ⚙️")
if st.button("**Clear History** 😰"):
st.session_state.messages = [{"role": "system", "content": base_prompt}]
st.rerun()
st.markdown("
", unsafe_allow_html=True)
st.header("Model Settings 🤖")
with st.expander("**Model**"):
st.markdown(model_name)
st.markdown("
", unsafe_allow_html=True)
with st.expander('**Base Prompt**'):
st.markdown(base_prompt)