Spaces:
Runtime error
Runtime error
import streamlit as st | |
import re | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Pinecone | |
from langchain.chat_models import ChatOpenAI | |
from langchain.chains import RetrievalQA | |
from langchain.output_parsers import OutputFixingParser | |
from langchain.schema import OutputParserException | |
import pinecone | |
import os | |
from dotenv import load_dotenv | |
import json | |
model_name = 'text-embedding-ada-002' | |
load_dotenv() | |
st.title("ICD DEMO") | |
gpt_api_key = st.text_input(label='Enter GPT KEY',placeholder="Enter GPT-API-KEY",label_visibility="collapsed") | |
pinecone_api_key=st.text_input(label='Enter PINECONE KEY',placeholder="Enter Pinecone API-KEY",label_visibility="collapsed") | |
embeddings = OpenAIEmbeddings( | |
model=model_name, | |
openai_api_key=gpt_api_key, | |
) | |
pinecone.init( | |
api_key=pinecone_api_key, | |
environment='gcp-starter', | |
) | |
index = pinecone.Index('vectordb') | |
vectorstore = Pinecone( | |
index=index, | |
embedding_function=embeddings.embed_query, | |
text_key='text', | |
) | |
def get_response(instruction, query): | |
""" | |
This function takes in an instruction and a query, and returns a response and a list of results. | |
instruction: str | |
query: str | |
Returns: str, list | |
""" | |
results = vectorstore.similarity_search(query, k=5) | |
llm = ChatOpenAI( | |
openai_api_key=gpt_api_key, | |
model_name='gpt-3.5-turbo', | |
temperature=0.0, | |
request_timeout=1000 | |
) | |
qa = RetrievalQA.from_chain_type( | |
llm=llm, | |
chain_type='stuff', | |
retriever=vectorstore.as_retriever(), | |
) | |
response = qa.run(str(instruction) + str(query)) | |
return response, results | |
instruction = """Given the progress notes below, your task is to carefully identify and list all the diagnosis, paying attention to the specific details such as laterality, severity, type, cause, and progression stage, that could influence to find the corresponding International Classification of Diseases (ICD) codes. | |
Please exclude any conditions that the patient explicitly denies (e.g., phrases like 'denies,' 'negative for,' etc). | |
Following the extraction process, compile the identified conditions in a list, prioritizing conditions of higher severity or urgency at the top, and present the data in a JSON format in descending order based on their priority or severity. | |
For example, below is the sample output: | |
{ | |
"Diseases": [ | |
{ | |
"Disease": "Fatty Liver", | |
"Laterality": "Not specified", | |
"Severity": "Not specified", | |
"Type": "Not specified", | |
"Cause": "Alcholic", | |
"Progression Stage": "Not specified", | |
"ICD" : "<ICD for Fattly Liver>" | |
}, | |
{ | |
"Disease": "Leg Fracture", | |
"Laterality": "Right", | |
"Severity": "Not specified", | |
"Type": "Not specified", | |
"Cause": "Accident", | |
"Progression Stage": "Not specified", | |
"ICD" : "<ICD for Leg Fracture> | |
} | |
] | |
} | |
""" | |
def iterate_output(lis): | |
text="" | |
for i in lis: | |
text += str(json.dumps(i, indent = 3))+",\n" | |
return text | |
def stringFormatterByAlok(inputString): | |
newData = "" | |
for i in inputString: | |
newData += i + "\n" | |
return newData | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if prompt := st.chat_input("Enter the progress note here"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
response = get_response(instruction, prompt) | |
with st.chat_message("assistant"): | |
output=json.loads(response[0])["Diseases"] | |
output_json_dump = json.dumps(output, indent=4) | |
for i in eval(output_json_dump): | |
st.write(i) | |
st.session_state.messages.append({"role": "assistant", "content": response}) |