File size: 658 Bytes
ede6f56
d7eaedb
ede6f56
d7eaedb
ede6f56
 
d7eaedb
 
ede6f56
d7eaedb
ede6f56
d7eaedb
 
ede6f56
 
 
 
 
d7eaedb
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import streamlit as st
from llama_cpp import Llama

# Load the GGUF model
@st.cache_resource
def load_model():
    model_path = "shanthi-323/lora_model_qnachatbot_cbt_q4_k_m"  # Ensure this path matches your uploaded GGUF model file
    return Llama(model_path=model_path)

llm = load_model()

st.title("Chat with GGUF Model")
st.write("Start interacting with your fine-tuned chatbot!")

# User input
user_input = st.text_input("You: ", placeholder="Type your message here...")

if user_input:
    # Generate a response using llama-cpp-python
    response = llm(user_input)
    st.text_area("Bot:", value=response["choices"][0]["text"].strip(), height=200)