Spaces:
Sleeping
Sleeping
File size: 2,772 Bytes
c0dd994 e509786 cc68ebd c0dd994 9256be3 860d63b 9256be3 c0dd994 9256be3 860d63b 9256be3 c0dd994 860d63b c0dd994 e509786 c0dd994 9256be3 c0dd994 860d63b 9256be3 c0dd994 28f05b3 9256be3 c0dd994 9256be3 c0dd994 9256be3 28f05b3 c0dd994 28f05b3 c0dd994 9256be3 c0dd994 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import streamlit as st
from llama_cpp import Llama
import os
import json
import time
# Function to convert message history to prompt
def prompt_from_messages(messages):
prompt = ''
for message in messages:
prompt += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n"
prompt += f"{message['content']}<|eot_id|>"
prompt = prompt[:-10]
return prompt
# Initialize the Llama model
llm = Llama.from_pretrained(
repo_id='gallen881/Llama-3-8B-Physics_Master-GGUF',
filename='unsloth.Q4_K_M.gguf',
n_ctx=2048,
verbose=False
)
# Set up Streamlit App Layout
st.title("Physics Master Chatbot")
st.markdown("Ask **Physics Master** any physics-related question.")
# Initialize chat history in session state
if 'messages' not in st.session_state:
st.session_state.messages = [
{
'role': 'system',
'content': 'You are a professional physics master. Answer physics questions directly without using any external resources.'
}
]
st.session_state.chat_time = time.time()
# Display chat history
for message in st.session_state.messages:
if message['role'] == 'user':
st.write(f"**You:** {message['content']}")
else:
st.write(f"**Physics Master:** {message['content']}")
# Use a form to manage user input and submission
with st.form(key="input_form", clear_on_submit=True):
user_input = st.text_input("Ask a question", key="user_input")
submit_button = st.form_submit_button(label="Send")
if submit_button and user_input:
# Append user message
user_message = {'role': 'user', 'content': user_input}
st.session_state.messages.append(user_message)
# Prepare to get the response from Physics Master
st.write('Physics Master is thinking...')
# Initialize an empty string to accumulate the response
full_response = ""
# Fetch response tokens and accumulate them
response = llm.create_chat_completion(
messages=st.session_state.messages,
stream=True
)
for chunk in response:
delta = chunk['choices'][0]['delta']
if 'role' in delta:
st.session_state.messages.append({'role': delta['role'], 'content': ''})
elif 'content' in delta:
token = delta['content']
# Accumulate tokens into the full response
full_response += token
# Once the full response is received, append it to the chat history
st.session_state.messages[-1]['content'] = full_response
# Display the full response as a paragraph
st.write(f"**Physics Master:** {full_response}")
# Save the chat history to a JSON file
with open('chat_history.json', 'w', encoding='utf8') as file:
json.dump(st.session_state.messages, file, indent=4)
|