import os from groq import Groq import streamlit as st from dotenv import load_dotenv from typing import List import functools import abc # Load environment variables (API keys) load_dotenv() # Define constants CONVO_TRAIL_CUTOFF = 5 PERSONAL_AI_ASSISTANT_PROMPT_HEAD = "You are a helpful assistant. [[previous_interactions]] [[latest_input]]" ASSISTANT_TYPE = "GroqPAF" # Define Interaction class class Interaction: def __init__(self, role: str, content: str): self.role = role self.content = content class PersonalAssistantFramework(abc.ABC): @staticmethod def timeit_decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) return result return wrapper @abc.abstractmethod def setup(self): pass @abc.abstractmethod def think(self, prompt: str) -> str: pass class GroqPAF(PersonalAssistantFramework): def __init__(self): self.client = Groq(api_key=os.getenv("GROQ_API_KEY")) def setup(self): self.llm_model = None @PersonalAssistantFramework.timeit_decorator def think(self, thought: str) -> str: try: response = self.client.chat.completions.create( model="mixtral-8x7b-32768", # You can change this to other Groq models messages=[ {"role": "system", "content": "You are a helpful assistant named Luna."}, {"role": "user", "content": thought} ], max_tokens=500 ) return response.choices[0].message.content.strip() except Exception as e: return f"An error occurred: {str(e)}" def build_prompt(latest_input: str, previous_interactions: List[Interaction]) -> str: previous_interactions_str = "\n".join( [ f""" {interaction.role} {interaction.content} """ for interaction in previous_interactions ] ) prepared_prompt = PERSONAL_AI_ASSISTANT_PROMPT_HEAD.replace( "[[previous_interactions]]", previous_interactions_str ).replace("[[latest_input]]", latest_input) return prepared_prompt # Set the page config for Streamlit st.set_page_config(page_title="Luna AI Chatbot", page_icon="πŸ€–", layout="wide") # Add premium CSS styling with the new blue color palette st.markdown(""" """, unsafe_allow_html=True) # Header and container for chat messages st.markdown('
✨Luna✨
Your Friendly Digital Assistant chatbot!😊
', unsafe_allow_html=True) # Container for chat messages and input area with st.container(): # State to store previous interactions if 'previous_interactions' not in st.session_state: st.session_state.previous_interactions = [] # Conversation history st.markdown( '''
Meet Luna,your friendly Ai assistant ready to help you in any language! πŸŒπŸ’¬
''', unsafe_allow_html=True ) for interaction in st.session_state.previous_interactions: if interaction.role == "human": st.markdown(f'
You: {interaction.content}
', unsafe_allow_html=True) else: st.markdown(f"""
Assistant: {interaction.content}
""", unsafe_allow_html=True) # Text input field user_input = st.text_area("Enter your message:", height=100, placeholder="Type your message here...") # Submit button if st.button("Send"): if user_input: assistant = GroqPAF() assistant.setup() # Build prompt and get response prompt = build_prompt(user_input, st.session_state.previous_interactions) response = assistant.think(prompt) # Update interactions st.session_state.previous_interactions.append(Interaction(role="human", content=user_input)) st.session_state.previous_interactions.append(Interaction(role="assistant", content=response)) # Display response st.markdown(f'
You: {user_input}
', unsafe_allow_html=True) st.markdown(f"""
Assistant: {response}
""", unsafe_allow_html=True) # Keep only the last CONVO_TRAIL_CUTOFF interactions if len(st.session_state.previous_interactions) > CONVO_TRAIL_CUTOFF: st.session_state.previous_interactions = st.session_state.previous_interactions[-CONVO_TRAIL_CUTOFF:] else: st.warning("Please enter a message.") st.write("This chatbot uses Groq's Mixtral 8x7B model to respond to your messages.")