iAIChat commited on
Commit
457ee8f
·
1 Parent(s): 96b3d7b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_index import VectorStoreIndex, SimpleDirectoryReader
3
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
4
+ from llama_index import LangchainEmbedding, ServiceContext
5
+ from llama_index import StorageContext, load_index_from_storage
6
+ from llama_index import LLMPredictor
7
+ #from transformers import HuggingFaceHub
8
+ from langchain import HuggingFaceHub
9
+ #from streamlit.components.v1 import html
10
+ from pathlib import Path
11
+ from time import sleep
12
+ import random
13
+ import string
14
+
15
+ import os
16
+ from dotenv import load_dotenv
17
+ load_dotenv()
18
+
19
+ st.set_page_config(page_title="Open AI Doc-Chat Assistant", layout="wide")
20
+ st.subheader("Open AI Doc-Chat Assistant: Life Enhancing with AI!")
21
+
22
+ #css_file = "main.css"
23
+ #with open(css_file) as f:
24
+ # st.markdown("<style>{}</style>".format(f.read()), unsafe_allow_html=True)
25
+
26
+ HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
27
+
28
+ # Load documents from a directory
29
+ documents = SimpleDirectoryReader('data').load_data()
30
+
31
+ embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2'))
32
+
33
+ llm_predictor = LLMPredictor(HuggingFaceHub(repo_id="HuggingFaceH4/starchat-beta", model_kwargs={"min_length":100, "max_new_tokens":1024, "do_sample":True, "temperature":0.2,"top_k":50, "top_p":0.95, "eos_token_id":49155}))
34
+
35
+ service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
36
+
37
+ def generate_random_string(length):
38
+ letters = string.ascii_lowercase
39
+ return ''.join(random.choice(letters) for i in range(length))
40
+ random_string = generate_random_string(20)
41
+
42
+ new_index = VectorStoreIndex.from_documents(
43
+ documents,
44
+ service_context=service_context,
45
+ )
46
+
47
+ new_index.storage_context.persist("random_string")
48
+
49
+ storage_context = StorageContext.from_defaults(persist_dir="random_string")
50
+
51
+ loadedindex = load_index_from_storage(storage_context=storage_context, service_context=service_context)
52
+
53
+ query_engine = loadedindex.as_query_engine()
54
+
55
+ while True:
56
+ try:
57
+ question = st.text_input("Enter your query here:")
58
+ print("Your query:\n"+question)
59
+ if question.strip().isspace():
60
+ st.write("Query Empty. Please enter valid query first.")
61
+ break
62
+ elif question == "":
63
+ # st.write("Query Empty. Please enter valid query first.")
64
+ break
65
+ elif question.strip() == "":
66
+ st.write("Query Empty. Please enter valid query first.")
67
+ break
68
+ elif question.isspace():
69
+ st.write("Query Empty. Please enter valid query first.")
70
+ break
71
+ elif question=="exit":
72
+ break
73
+ elif question!="":
74
+ initial_response = query_engine.query(question)
75
+ temp_ai_response=str(initial_response)
76
+ final_ai_response=temp_ai_response.partition('<|end|>')[0]
77
+ print("AI Response:\n"+final_ai_response)
78
+ st.write("AI Response:\n\n"+final_ai_response)
79
+ except Exception as e:
80
+ st.stop()