chatbot / app.py
shanthi-323's picture
Update app.py
d7eaedb verified
raw
history blame
658 Bytes
import streamlit as st
from llama_cpp import Llama
# Load the GGUF model
@st.cache_resource
def load_model():
model_path = "shanthi-323/lora_model_qnachatbot_cbt_q4_k_m" # Ensure this path matches your uploaded GGUF model file
return Llama(model_path=model_path)
llm = load_model()
st.title("Chat with GGUF Model")
st.write("Start interacting with your fine-tuned chatbot!")
# User input
user_input = st.text_input("You: ", placeholder="Type your message here...")
if user_input:
# Generate a response using llama-cpp-python
response = llm(user_input)
st.text_area("Bot:", value=response["choices"][0]["text"].strip(), height=200)