File size: 6,968 Bytes
997488c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
051dc03
 
997488c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5efc4d
467b7f2
997488c
467b7f2
997488c
b5efc4d
 
 
 
 
 
 
 
 
 
5b417ee
b5efc4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c0039e
b5efc4d
 
 
 
 
 
826a7be
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467b7f2
 
 
 
 
 
826a7be
 
 
506e049
826a7be
506e049
 
826a7be
1a9d4cd
506e049
 
1a9d4cd
 
826a7be
 
506e049
1a9d4cd
506e049
 
 
 
 
 
1a9d4cd
 
506e049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e1ed79
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
from flask import Flask, request
import os
import requests
from langchain.vectorstores import Chroma
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.chat_models import ChatOpenAI

import numpy 
import torch
import json
import textwrap
from flask_cors import CORS
import socket;

import gradio as gr

app = Flask(__name__)
cors = CORS(app)


def get_local_ip():
  s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
  s.connect(("8.8.8.8", 80))
  return s.getsockname()[0]

def wrap_text_preserve_newlines(text, width=110):
    # Split the input text into lines based on newline characters
    lines = text.split('\n')
    # Wrap each line individually
    wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
    # Join the wrapped lines back together using newline characters
    wrapped_text = '\n'.join(wrapped_lines)
    return wrapped_text

def process_llm_response(llm_response):
    response_data = {
        'result': wrap_text_preserve_newlines(llm_response['result']),
        'sources': []
    }
    print(wrap_text_preserve_newlines(llm_response['result']))
    print('\n\nSources:')
    for source in llm_response["source_documents"]:
        print(source.metadata['source']+ "Page Number: " + str(source.metadata['page']))
        response_data['sources'].append({"book": source.metadata['source'], "page": source.metadata['page']})
    # return json.dumps(response_data)
    return response_data
    


# @app.route('/question', methods=['POST'])
# def answer():
#     content_type = request.headers.get('Content-Type')
#     if (content_type == 'application/json'):
#         data = request.json
#         question = data['question']
#         response = get_answer(question)
#         return response
#     else:
#         return 'Content-Type not supported!'


ip=get_local_ip()
os.environ["OPENAI_API_KEY"] = "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"
# Embed and store the texts
# if(torch.cuda.is_available() == False):
#     print("No GPU available")
#     exit(1)

torch.cuda.empty_cache()
torch.max_split_size_mb = 100
instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl", 
                                                  model_kwargs={"device": "cpu"})
# Supplying a persist_directory will store the embeddings on disk
persist_directory = 'db'
vectordb2 = Chroma(persist_directory=persist_directory, 
              embedding_function=instructor_embeddings,
               )
retriever = vectordb2.as_retriever(search_kwargs={"k": 3})
vectordb2.persist()
# Set up the turbo LLM
turbo_llm = ChatOpenAI(
    temperature=0,
    model_name='gpt-3.5-turbo'
)
qa_chain = RetrievalQA.from_chain_type(llm=turbo_llm, 
                              chain_type="stuff", 
                              retriever=retriever, 
                              return_source_documents=True)
qa_chain.combine_documents_chain.llm_chain.prompt.messages[0].prompt.template= """

Use only the following pieces of context. Answer the users question only if they are related to the context given.

If you don't know the answer, just say that you don't know, don't try to make up an answer. Make your answer very detailed and long. 

Use bullet points to explain when required. 

Use only text found in the context as your knowledge source for the answer. 

----------------

{context}"""

def book_url(book):
    if book == "BD Human Anatomy - Lower Limb, Abdomen & Pelvis (Volume 2).pdf":
        return "BD+Human+Anatomy+-+Lower+Limb%2C+Abdomen+%26+Pelvis+(Volume+2).pdf"
    elif book == "BD Human Anatomy - Upper Limb & Thorax (Volume 1).pdf":
        return "BD+Human+Anatomy+-+Upper+Limb++Thorax+(Volume+1).pdf"
    elif book == "[Richard S.Snell] Clinical Neuroanatomy (7th Ed.)":
        return "%5BRichard+S.Snell%5D+Clinical+Neuroanatomy+(7th+Ed.).pdf"
    elif book == "BD Chaurasia's Handbook of General Anatomy, 4th Edition.pdf":
        return "BD+Chaurasia's+Handbook+of+General+Anatomy%2C+4th+Edition.pdf"
    elif book == "Vishram Singh Textbook of Anatomy  Upper Limb and Thorax..pdf":
        return "BD+Chaurasia's+Handbook+of+General+Anatomy%2C+4th+Edition.pdf"
    elif book == "Vishram Singh Textbook of Anatomy Vol 2.pdf":
        return "Vishram+Singh+Textbook+of+Anatomy+Vol+2.pdf"
    elif book == "BD Human Anatomy - Head, Neck & Brain (Volume 3).pdf":
        return "BD+Human+Anatomy+-+Head%2C+Neck+%26+Brain+(Volume+3).pdf"
    elif book == "Textbook of Clinical Neuroanatomy.pdf":
        return "Textbook+of+Clinical+Neuroanatomy.pdf"
    elif book == "Vishram Singh Textbook of Anatomy Vol 3.pdf":
        return "Vishram+Singh+Textbook+of+Anatomy+Vol+3.pdf"


def print_array(arr):
    # Convert the array to a string representation
    arr_str = str(arr)
    return arr_str

def html_link_generator(book, page):
    bookurl = book_url(book)
    url = f"https://diagrams1.s3.ap-south-1.amazonaws.com/anatomybooks/{bookurl}#page={page}"
    # html = f'<iframe src="{url}" width="800" height="600"></iframe>'
    # print(url)
    
    return url

def getanswer(question):
    if question=="" :
        return "Please ask a question" , []
    llm_response = qa_chain(question)
    response = process_llm_response(llm_response)
    html= html_link_generator(response["sources"][0]["book"][22:], response["sources"][0]["page"])
    # html = """<iframe src="https://diagrams1.s3.ap-south-1.amazonaws.com/anatomybooks/BD+Chaurasia's+Handbook+of+General+Anatomy%2C+4th+Edition.pdf#page=40" width="800" height="600"></iframe>"""
    return response["result"], response['sources']

def makevisible(source1,source2,source3):
    return{
        source1: gr.update(visible=True),
        source2: gr.update(visible=True),
        source3: gr.update(visible=True)
    }


with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column(scale=1, min_width=600):
            question = gr.Textbox(label="Question")
            submitbtn = gr.Button("Submit").style(full_width=True)
            answer = gr.Textbox(label="Answer", interactive=False)
            sources = gr.Json(label="Sources", interactive=False)
            source1 = gr.Button(label="Source 1", visible=False)
            source2 = gr.Button(label="Source 2", visible=False)
            source3 = gr.Button(label="Source 3", visible=False)
            
            submitbtn.click(fn=getanswer, inputs=[question], outputs=[answer, sources], api_name="question")
            # source1.click(fn=None, _js=f"""window.open('"""+"""', target="_blank");""")            
            # sources.change(make_source_buttons, [sources, source1, source2, source3], [source1,source2,source3])
            
demo.launch()