|
import os |
|
import streamlit as st |
|
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate |
|
from langchain_community.llms import HuggingFaceHub |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
from langchain_community.vectorstores import Chroma |
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.prompts import PromptTemplate |
|
|
|
|
|
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2" |
|
|
|
@st.cache_resource |
|
def get_response(question): |
|
result = st.session_state.conversational_chain({"question": question}) |
|
response_text = result.get("answer", "Maaf, saya tidak mengetahui jawaban itu.") |
|
|
|
|
|
if "Answer:" in response_text: |
|
response_text = response_text.split("Answer:")[1].strip() |
|
return response_text |
|
|
|
|
|
def setup_vectorstore(): |
|
persist_directory = "./vector_db_dir" |
|
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME) |
|
return Chroma(persist_directory=persist_directory, embedding_function=embeddings) |
|
|
|
def chat_chain(vectorstore): |
|
hf_hub_llm = HuggingFaceHub( |
|
repo_id="SeaLLMs/SeaLLMs-v3-7B-Chat", |
|
model_kwargs={"temperature": 1, "max_new_tokens": 1024}, |
|
) |
|
|
|
prompt_template = """ |
|
You are an assistant specialized in women's health. Use the retrieved documents to answer the user's question. |
|
If you don't know the answer or the information is not in the documents, reply with: "I'm sorry, I don't know." |
|
|
|
Chat History: |
|
{chat_history} |
|
|
|
Question: |
|
{question} |
|
|
|
Answer:""" |
|
prompt = PromptTemplate(input_variables=["chat_history", "question"], template=prompt_template) |
|
|
|
|
|
|
|
|
|
retriever = vectorstore.as_retriever( |
|
search_type="similarity", |
|
search_kwargs={"k": 2} |
|
) |
|
|
|
memory = ConversationBufferMemory( |
|
llm=hf_hub_llm, |
|
output_key="answer", |
|
memory_key="chat_history", |
|
return_messages=True |
|
) |
|
|
|
chain = ConversationalRetrievalChain.from_llm( |
|
llm=hf_hub_llm, |
|
retriever=retriever, |
|
chain_type="stuff", |
|
memory=memory, |
|
verbose=True, |
|
combine_docs_chain_kwargs={"prompt": prompt}, |
|
) |
|
return chain |
|
|
|
|
|
st.set_page_config( |
|
page_title="Asisten Kesehatan Wanita", |
|
page_icon="π", |
|
layout="centered" |
|
) |
|
|
|
st.title("π Asisten Kesehatan Wanita") |
|
|
|
if "chat_history" not in st.session_state: |
|
st.session_state.chat_history = [] |
|
|
|
if "vectorstore" not in st.session_state: |
|
st.session_state.vectorstore = setup_vectorstore() |
|
|
|
if "conversational_chain" not in st.session_state: |
|
st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore) |
|
|
|
|
|
for message in st.session_state.chat_history: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
user_input = st.chat_input("Tanyakan sesuatu...") |
|
|
|
if user_input: |
|
st.session_state.chat_history.append({"role": "user", "content": user_input}) |
|
|
|
with st.chat_message("user"): |
|
st.markdown(user_input) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
response = st.session_state.conversational_chain({"question": user_input}) |
|
assistant_response = response["answer"] |
|
st.markdown(assistant_response) |
|
st.session_state.chat_history.append({"role": "assistant", "content": assistant_response}) |