Spaces:
Sleeping
Sleeping
File size: 3,063 Bytes
aec35e1 9206a9e aec35e1 9206a9e aec35e1 9206a9e aec35e1 16eba78 9206a9e 16eba78 aec35e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import os
import openai
import streamlit as st
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI as l_OpenAI
from transformers import pipeline
from helpers.foundation_models import *
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
with st.expander("Instructions"):
st.sidebar.markdown(
r"""
# π Streamlit + Hugging Face Demo π€
## Introduction π
This demo showcases how to interact with Large Language Models (LLMs) on Hugging Face using Streamlit.
"""
)
option = st.sidebar.selectbox(
"Which task do you want to do?",
("Sentiment Analysis", "Medical Summarization", "ChatGPT", "ChatGPT (with Google)"),
)
clear_button = st.sidebar.button("Clear Conversation", key="clear")
# Reset everything
if clear_button:
st.session_state.messages = []
# React to user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.spinner("Wait for it..."):
if option == "Sentiment Analysis":
pipe_sentiment_analysis = pipeline("sentiment-analysis")
if prompt:
out = pipe_sentiment_analysis(prompt)
doc = f"""
Prompt: {prompt}
Sentiment: {out[0]["label"]}
Score: {out[0]["score"]}
"""
elif option == "Medical Summarization":
pipe_summarization = pipeline(
"summarization", model="Falconsai/medical_summarization"
)
if prompt:
out = pipe_summarization(prompt)
doc = out[0]["summary_text"]
elif option == "ChatGPT":
if prompt:
out = call_chatgpt(query=prompt)
doc = out
elif option == "ChatGPT (with Google)":
if prompt:
ans_langchain = call_langchain(prompt)
prompt = f"""
Based on the internet search results: {ans_langchain};
Answer the user question: {prompt}
"""
out = call_chatgpt(query=prompt)
doc = out
else:
doc = ""
response = f"{doc}"
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
|