binqiangliu commited on
Commit
05f0071
·
1 Parent(s): b5514cf

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_index import VectorStoreIndex, SimpleDirectoryReader
3
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
4
+ from llama_index import LangchainEmbedding, ServiceContext
5
+ from llama_index import StorageContext, load_index_from_storage
6
+ from llama_index import LLMPredictor
7
+ #from transformers import HuggingFaceHub
8
+ from langchain import HuggingFaceHub
9
+ from streamlit.components.v1 import html
10
+ from pathlib import Path
11
+ from time import sleep
12
+ import random
13
+ import string
14
+
15
+ import os
16
+ from dotenv import load_dotenv
17
+ load_dotenv()
18
+
19
+ import timeit
20
+
21
+ st.set_page_config(page_title="Open AI Doc-Chat Assistant", layout="wide")
22
+ st.subheader("Open AI Doc-Chat Assistant: Life Enhancing with AI!")
23
+
24
+ css_file = "main.css"
25
+ with open(css_file) as f:
26
+ st.markdown("<style>{}</style>".format(f.read()), unsafe_allow_html=True)
27
+
28
+ HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
29
+
30
+ documents=[]
31
+
32
+ def generate_random_string(length):
33
+ letters = string.ascii_lowercase
34
+ return ''.join(random.choice(letters) for i in range(length))
35
+ random_string = generate_random_string(20)
36
+ directory_path=random_string
37
+
38
+ with st.sidebar:
39
+ st.subheader("Upload your Documents Here: ")
40
+ pdf_files = st.file_uploader("Choose your PDF Files and Press OK", type=['pdf'], accept_multiple_files=True)
41
+ if pdf_files:
42
+ os.makedirs(directory_path)
43
+ for pdf_file in pdf_files:
44
+ file_path = os.path.join(directory_path, pdf_file.name)
45
+ with open(file_path, 'wb') as f:
46
+ f.write(pdf_file.read())
47
+ st.success(f"File '{pdf_file.name}' saved successfully.")
48
+
49
+ try:
50
+ start_1 = timeit.default_timer() # Start timer
51
+ st.write(f"QA文档加载开始:{start_1}")
52
+ documents = SimpleDirectoryReader(directory_path).load_data()
53
+ end_1 = timeit.default_timer() # Start timer
54
+ st.write(f"QA文档加载结束:{end_1}")
55
+ st.write(f"QA文档加载耗时:{end_1 - start_1}")
56
+ except Exception as e:
57
+ print("waiting for path creation.")
58
+
59
+ # Load documents from a directory
60
+ #documents = SimpleDirectoryReader('data').load_data()
61
+
62
+ start_2 = timeit.default_timer() # Start timer
63
+ st.write(f"向量模型加载开始:{start_2}")
64
+ embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2'))
65
+ end_2 = timeit.default_timer() # Start timer
66
+ st.write(f"向量模型加载加载结束:{end_2}")
67
+ st.write(f"向量模型加载耗时:{end_2 - start_2}")
68
+
69
+ llm_predictor = LLMPredictor(HuggingFaceHub(repo_id="HuggingFaceH4/starchat-beta", model_kwargs={"min_length":100, "max_new_tokens":1024, "do_sample":True, "temperature":0.2,"top_k":50, "top_p":0.95, "eos_token_id":49155}))
70
+
71
+ service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
72
+
73
+ start_3 = timeit.default_timer() # Start timer
74
+ st.write(f"向量库构建开始:{start_3}")
75
+ new_index = VectorStoreIndex.from_documents(
76
+ documents,
77
+ service_context=service_context,
78
+ )
79
+ end_3 = timeit.default_timer() # Start timer
80
+ st.write(f"向量库构建结束:{end_3}")
81
+ st.write(f"向量库构建耗时:{end_3 - start_3}")
82
+
83
+ new_index.storage_context.persist("directory_path")
84
+
85
+ storage_context = StorageContext.from_defaults(persist_dir="directory_path")
86
+
87
+ start_4 = timeit.default_timer() # Start timer
88
+ st.write(f"向量库装载开始:{start_4}")
89
+ loadedindex = load_index_from_storage(storage_context=storage_context, service_context=service_context)
90
+ end_4 = timeit.default_timer() # Start timer
91
+ st.write(f"向量库装载结束:{end_4}")
92
+ st.write(f"向量库装载耗时:{end_4 - start_4}")
93
+
94
+ query_engine = loadedindex.as_query_engine()
95
+
96
+ user_question = st.text_input("Enter your query here:")
97
+ if user_question !="" and not user_question.strip().isspace() and not user_question == "" and not user_question.strip() == "" and not user_question.isspace():
98
+ print("user question: "+user_question)
99
+ with st.spinner("AI Thinking...Please wait a while to Cheers!"):
100
+ start_5 = timeit.default_timer() # Start timer
101
+ st.write(f"Query Engine - AI QA开始:{start_5}")
102
+ initial_response = query_engine.query(user_question)
103
+ temp_ai_response=str(initial_response)
104
+ final_ai_response=temp_ai_response.partition('<|end|>')[0]
105
+ print("AI Response:\n"+final_ai_response)
106
+ st.write("AI Response:\n\n"+final_ai_response)