mmaleki92 commited on
Commit
9256be3
·
verified ·
1 Parent(s): 67377af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -30
app.py CHANGED
@@ -1,39 +1,75 @@
1
  import streamlit as st
2
  from llama_cpp import Llama
 
 
 
3
 
4
- # Title for your app
5
- st.title("Llama-3-8B-Physics Master - Chatbot")
 
 
 
 
 
 
6
 
7
- # Load the model from Hugging Face using llama_cpp
8
- @st.cache_resource
9
- def load_model():
10
- # Load the model from the Hugging Face Hub
11
- model = Llama.from_pretrained(
12
- repo_id="gallen881/Llama-3-8B-Physics_Master-GGUF",
13
- filename= "unsloth.Q4_K_M.gguf" # or unsloth.F16.gguf for a larger file
14
- )
15
- return model
 
 
16
 
17
- # Load the model once and store it in cache
18
- model = load_model()
 
 
 
 
 
 
 
19
 
20
- # Text input for the user
21
- user_input = st.text_area("Enter your message here:")
 
 
 
 
22
 
23
- if st.button("Generate Response"):
24
- if user_input:
25
- # Create chat completion with the model
26
- response = model.create_chat_completion(
27
- messages=[
28
- {
29
- "role": "user",
30
- "content": user_input
31
- }
32
- ]
33
- )
34
 
35
- # Extract the content from the model's response
36
- st.write("Model Response:", response['choices'][0]['message']['content'])
 
 
37
 
38
- else:
39
- st.write("Please enter a message.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from llama_cpp import Llama
3
+ import os
4
+ import json
5
+ import time
6
 
7
+ # Function to convert message history to prompt
8
+ def prompt_from_messages(messages):
9
+ prompt = ''
10
+ for message in messages:
11
+ prompt += f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n"
12
+ prompt += f"{message['content']}<|eot_id|>"
13
+ prompt = prompt[:-10]
14
+ return prompt
15
 
16
+ # Initialize the Llama model
17
+ llm = Llama.from_pretrained(
18
+ repo_id='gallen881/Llama-3-8B-Physics_Master-GGUF',
19
+ filename='unsloth.Q4_K_M.gguf',
20
+ n_ctx=2048,
21
+ verbose=False
22
+ )
23
+
24
+ # Set up Streamlit App Layout
25
+ st.title("Physics Master Chatbot")
26
+ st.markdown("Ask **Physics Master** any physics-related question.")
27
 
28
+ # Initialize chat history in session state
29
+ if 'messages' not in st.session_state:
30
+ st.session_state.messages = [
31
+ {
32
+ 'role': 'system',
33
+ 'content': 'You are a professional physics master. Answer physics questions directly without using any external resources.'
34
+ }
35
+ ]
36
+ st.session_state.chat_time = time.time()
37
 
38
+ # Display chat history
39
+ for message in st.session_state.messages:
40
+ if message['role'] == 'user':
41
+ st.write(f"**You:** {message['content']}")
42
+ else:
43
+ st.write(f"**Physics Master:** {message['content']}")
44
 
45
+ # Input for user to ask questions
46
+ user_input = st.text_input("Ask a question", key="user_input")
 
 
 
 
 
 
 
 
 
47
 
48
+ if user_input:
49
+ # Append user message
50
+ user_message = {'role': 'user', 'content': user_input}
51
+ st.session_state.messages.append(user_message)
52
 
53
+ # Prepare to get the response from Physics Master
54
+ st.write('Physics Master is thinking...')
55
+ response = llm.create_chat_completion(
56
+ messages=st.session_state.messages,
57
+ stream=True
58
+ )
59
+
60
+ # Handle the response from the model and append it to the session state
61
+ for chunk in response:
62
+ delta = chunk['choices'][0]['delta']
63
+ if 'role' in delta:
64
+ st.session_state.messages.append({'role': delta['role'], 'content': ''})
65
+ elif 'content' in delta:
66
+ token = delta['content']
67
+ st.session_state.messages[-1]['content'] += token
68
+ st.write(token, end="", flush=True)
69
+
70
+ # Reset user input box
71
+ st.session_state.user_input = ""
72
+
73
+ # Save the chat history to a JSON file
74
+ with open('chat_history.json', 'w', encoding='utf8') as file:
75
+ json.dump(st.session_state.messages, file, indent=4)