Spaces:
Runtime error
Runtime error
Commit
·
bbd7b76
1
Parent(s):
aa4608e
Initial upload
Browse files- README.md +1 -12
- app.py +76 -0
- config.yml +13 -0
- gt-policy-bot.yml +28 -0
- llm_client.py +41 -0
- pinecone_index.py +109 -0
- requirements.txt +11 -0
- vectorise.py +69 -0
README.md
CHANGED
@@ -1,12 +1 @@
|
|
1 |
-
|
2 |
-
title: Gt Policy Bot
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 4.1.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
# gt-policy-bot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
from typing import List
|
6 |
+
|
7 |
+
from llm_client import PalmClient
|
8 |
+
from pinecone_index import PinceconeIndex
|
9 |
+
|
10 |
+
SYSTEM_MESSAGE = 'Give a precise answer to the question based on only the \
|
11 |
+
context and evidence and do not be verbose.'
|
12 |
+
TOP_K = 2
|
13 |
+
|
14 |
+
|
15 |
+
def format_prompt(question: str, evidence: List[str]):
|
16 |
+
evidence_string = ''
|
17 |
+
for i, ev in enumerate(evidence):
|
18 |
+
evidence_string.join(f'\n Evidence {i+1}: {ev}')
|
19 |
+
|
20 |
+
content = f"{SYSTEM_MESSAGE} \
|
21 |
+
\n ### Question:{question} \
|
22 |
+
\n ### Evidence: {evidence_string} \
|
23 |
+
\n ### Response:"
|
24 |
+
|
25 |
+
return content
|
26 |
+
|
27 |
+
|
28 |
+
if __name__ == '__main__':
|
29 |
+
|
30 |
+
config_path = 'config.yml'
|
31 |
+
with open('config.yml', 'r') as file:
|
32 |
+
config = yaml.safe_load(file)
|
33 |
+
|
34 |
+
print(config)
|
35 |
+
|
36 |
+
data_path = config['paths']['data_path']
|
37 |
+
project = config['paths']['project']
|
38 |
+
|
39 |
+
index_name = config['pinecone']['index-name']
|
40 |
+
embedding_model = config['sentence-transformers']['model-name']
|
41 |
+
embedding_dimension = config['sentence-transformers'][
|
42 |
+
'embedding-dimension']
|
43 |
+
|
44 |
+
index = PinceconeIndex(index_name, embedding_model)
|
45 |
+
index.connect_index(embedding_dimension, False)
|
46 |
+
|
47 |
+
palm_client = PalmClient()
|
48 |
+
|
49 |
+
def get_answer(question: str):
|
50 |
+
evidence = index.query(question, top_k=TOP_K)
|
51 |
+
prompt_with_evidence = format_prompt(question, evidence)
|
52 |
+
print(prompt_with_evidence)
|
53 |
+
response = palm_client.generate_text(prompt_with_evidence)
|
54 |
+
final_output = [response] + evidence
|
55 |
+
|
56 |
+
return final_output
|
57 |
+
|
58 |
+
context_outputs = [gr.Textbox(label=f'Evidence {i+1}')
|
59 |
+
for i in range(TOP_K)]
|
60 |
+
result_output = [gr.Textbox(label='Answer')]
|
61 |
+
|
62 |
+
gradio_outputs = result_output + context_outputs
|
63 |
+
gradio_inputs = gr.Textbox(placeholder="Enter your question...")
|
64 |
+
|
65 |
+
demo = gr.Interface(
|
66 |
+
fn=get_answer,
|
67 |
+
inputs=gradio_inputs,
|
68 |
+
# outputs=[gr.Textbox(label=f'Document {i+1}') for i in range(TOP_K)],
|
69 |
+
outputs=gradio_outputs,
|
70 |
+
title="GT Student Code of Conduct Bot",
|
71 |
+
description="Get LLM-powered answers to questions about the \
|
72 |
+
Georgia Tech Student Code of Conduct. The evidences are exerpts\
|
73 |
+
from the Code of Conduct."
|
74 |
+
)
|
75 |
+
|
76 |
+
demo.launch()
|
config.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
paths:
|
2 |
+
project: 'code_of_conduct_1'
|
3 |
+
data_path: './data/code_of_conduct/'
|
4 |
+
chunking: 'manual'
|
5 |
+
auto_chunk_file: './data/code_of_conduct/code_of_conduct.csv'
|
6 |
+
manual_chunk_file: './data/code_of_conduct/code_of_conduct_manual.csv'
|
7 |
+
|
8 |
+
pinecone:
|
9 |
+
index-name: gt-code-of-conduct
|
10 |
+
|
11 |
+
sentence-transformers:
|
12 |
+
model-name: thenlper/gte-base
|
13 |
+
embedding-dimension: 768
|
gt-policy-bot.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: gtpb
|
2 |
+
channels:
|
3 |
+
- defaults
|
4 |
+
dependencies:
|
5 |
+
- matplotlib
|
6 |
+
- numpy
|
7 |
+
- imageio
|
8 |
+
- scikit-learn
|
9 |
+
- notebook
|
10 |
+
- pandas
|
11 |
+
- scipy
|
12 |
+
- ipywidgets
|
13 |
+
- statsmodels
|
14 |
+
- jupyterlab
|
15 |
+
- plotly
|
16 |
+
- pip
|
17 |
+
- tqdm
|
18 |
+
- pip:
|
19 |
+
- kaleido
|
20 |
+
- colab_ssh
|
21 |
+
- gradio
|
22 |
+
- faiss-cpu
|
23 |
+
- pinecone-client
|
24 |
+
- pdfminer-six
|
25 |
+
- sentence-transformers
|
26 |
+
- torch
|
27 |
+
- langchain
|
28 |
+
- python-dotenv
|
llm_client.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import google.generativeai as palm
|
4 |
+
|
5 |
+
|
6 |
+
class PalmClient:
|
7 |
+
def __init__(self):
|
8 |
+
self.connect_client()
|
9 |
+
|
10 |
+
def connect_client(self):
|
11 |
+
if (not os.getenv('GOOGLE_PALM_KEY')):
|
12 |
+
raise Exception('Please set your Google MakerSuite API key')
|
13 |
+
|
14 |
+
api_key = os.getenv('GOOGLE_PALM_KEY')
|
15 |
+
palm.configure(api_key=api_key)
|
16 |
+
|
17 |
+
safety_overrides = [
|
18 |
+
{"category": "HARM_CATEGORY_DEROGATORY", "threshold": 4},
|
19 |
+
{"category": "HARM_CATEGORY_TOXICITY", "threshold": 4},
|
20 |
+
{"category": "HARM_CATEGORY_VIOLENCE", "threshold": 4},
|
21 |
+
{"category": "HARM_CATEGORY_SEXUAL", "threshold": 4},
|
22 |
+
{"category": "HARM_CATEGORY_MEDICAL", "threshold": 4},
|
23 |
+
{"category": "HARM_CATEGORY_DANGEROUS", "threshold": 4}
|
24 |
+
]
|
25 |
+
|
26 |
+
defaults = {
|
27 |
+
'model': 'models/text-bison-001',
|
28 |
+
'temperature': 0.7,
|
29 |
+
'candidate_count': 1,
|
30 |
+
'top_k': 40,
|
31 |
+
'top_p': 0.95,
|
32 |
+
'max_output_tokens': 1024,
|
33 |
+
'stop_sequences': [],
|
34 |
+
'safety_settings': safety_overrides,
|
35 |
+
}
|
36 |
+
|
37 |
+
self.defaults = defaults
|
38 |
+
|
39 |
+
def generate_text(self, prompt: str) -> str:
|
40 |
+
response = palm.generate_text(**self.defaults, prompt=prompt)
|
41 |
+
return response.candidates[0]['output']
|
pinecone_index.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pinecone
|
3 |
+
import time
|
4 |
+
import yaml
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
from langchain.document_loaders import DataFrameLoader
|
9 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
10 |
+
from langchain.vectorstores.pinecone import Pinecone
|
11 |
+
from typing import List
|
12 |
+
|
13 |
+
from dotenv import load_dotenv
|
14 |
+
from pathlib import Path
|
15 |
+
|
16 |
+
|
17 |
+
class PinceconeIndex:
|
18 |
+
def __init__(self, index_name: str, model_name: str):
|
19 |
+
self.index_name = index_name
|
20 |
+
self._embeddingModel = HuggingFaceEmbeddings(model_name=model_name)
|
21 |
+
|
22 |
+
def connect_index(self, embedding_dimension: int,
|
23 |
+
delete_existing: bool = False):
|
24 |
+
index_name = self.index_name
|
25 |
+
|
26 |
+
# load pinecone env variables within Google Colab
|
27 |
+
if (not os.getenv('PINECONE_KEY')) or (not os.getenv('PINECONE_ENV')):
|
28 |
+
dotenv_path = Path('/content/gt-policy-bot/config.env')
|
29 |
+
load_dotenv(dotenv_path=dotenv_path)
|
30 |
+
|
31 |
+
pinecone.init(
|
32 |
+
api_key=os.getenv('PINECONE_KEY'),
|
33 |
+
environment=os.getenv('PINECONE_ENV'),
|
34 |
+
)
|
35 |
+
|
36 |
+
if index_name in pinecone.list_indexes() and delete_existing:
|
37 |
+
pinecone.delete_index(index_name)
|
38 |
+
|
39 |
+
if index_name not in pinecone.list_indexes():
|
40 |
+
pinecone.create_index(index_name, dimension=embedding_dimension)
|
41 |
+
|
42 |
+
index = pinecone.Index(index_name)
|
43 |
+
|
44 |
+
pinecone.describe_index(index_name)
|
45 |
+
self._index = index
|
46 |
+
|
47 |
+
def upsert_docs(self, df: pd.DataFrame, text_col: str):
|
48 |
+
loader = DataFrameLoader(df, page_content_column=text_col)
|
49 |
+
docs = loader.load()
|
50 |
+
Pinecone.from_documents(docs, self._embeddingModel,
|
51 |
+
index_name=self.index_name)
|
52 |
+
|
53 |
+
def get_embedding_model(self):
|
54 |
+
return self._embeddingModel
|
55 |
+
|
56 |
+
def get_index_name(self):
|
57 |
+
return self.index_name
|
58 |
+
|
59 |
+
def query(self, query: str, top_k: int = 5) -> List[str]:
|
60 |
+
docsearch = Pinecone.from_existing_index(self.index_name,
|
61 |
+
self._embeddingModel)
|
62 |
+
res = docsearch.similarity_search(query, k=top_k)
|
63 |
+
|
64 |
+
return [doc.page_content for doc in res]
|
65 |
+
|
66 |
+
|
67 |
+
if __name__ == '__main__':
|
68 |
+
config_path = 'config.yml'
|
69 |
+
with open('config.yml', 'r') as file:
|
70 |
+
config = yaml.safe_load(file)
|
71 |
+
|
72 |
+
print(config)
|
73 |
+
|
74 |
+
data_path = config['paths']['data_path']
|
75 |
+
project = config['paths']['project']
|
76 |
+
format = '.csv'
|
77 |
+
|
78 |
+
index_name = config['pinecone']['index-name']
|
79 |
+
embedding_model = config['sentence-transformers'][
|
80 |
+
'model-name']
|
81 |
+
embedding_dimension = config['sentence-transformers'][
|
82 |
+
'embedding-dimension']
|
83 |
+
delete_existing = True
|
84 |
+
|
85 |
+
if config['paths']['chunking'] == 'manual':
|
86 |
+
print("Using manual chunking")
|
87 |
+
file_path_embedding = config['paths']['manual_chunk_file']
|
88 |
+
df = pd.read_csv(file_path_embedding, header=None, names=['chunks'])
|
89 |
+
else:
|
90 |
+
print("Using automatic chunking")
|
91 |
+
file_path_embedding = config['paths']['auto_chunk_file']
|
92 |
+
df = pd.read_csv(file_path_embedding, index_col=0)
|
93 |
+
|
94 |
+
print(df)
|
95 |
+
start_time = time.time()
|
96 |
+
index = PinceconeIndex(index_name, embedding_model)
|
97 |
+
index.connect_index(embedding_dimension, delete_existing)
|
98 |
+
index.upsert_docs(df, 'chunks')
|
99 |
+
end_time = time.time()
|
100 |
+
print(f'Indexing took {end_time - start_time} seconds')
|
101 |
+
|
102 |
+
index = PinceconeIndex(index_name, embedding_model)
|
103 |
+
index.connect_index(embedding_dimension, delete_existing=False)
|
104 |
+
|
105 |
+
query = "When was the student code of conduct last revised?"
|
106 |
+
res = index.query(query, top_k=5)
|
107 |
+
|
108 |
+
# assert len(res) == 5
|
109 |
+
print(res)
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
pinecone-client
|
3 |
+
sentence-transformers
|
4 |
+
torch
|
5 |
+
tqdm
|
6 |
+
pdfminer-six
|
7 |
+
langchain
|
8 |
+
gradio
|
9 |
+
python-dotenv
|
10 |
+
faiss-cpu
|
11 |
+
google-generativeai
|
vectorise.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tqdm
|
2 |
+
import yaml
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
from sentence_transformers import SentenceTransformer
|
8 |
+
|
9 |
+
BATCH_SIZE = 2
|
10 |
+
|
11 |
+
|
12 |
+
class Vectorizer:
|
13 |
+
def __init__(self, model_name: str):
|
14 |
+
self.model_name = model_name
|
15 |
+
self.model = SentenceTransformer(model_name)
|
16 |
+
self.batch_size = BATCH_SIZE
|
17 |
+
|
18 |
+
def get_query_embedding(self, query: str) -> np.ndarray:
|
19 |
+
return self.model.encode(query)
|
20 |
+
|
21 |
+
def get_embeddings(self, df: pd.DataFrame, data_col: str):
|
22 |
+
docs = df[data_col]
|
23 |
+
num_docs = len(docs)
|
24 |
+
embeddings = []
|
25 |
+
for i in tqdm.tqdm(range(0, num_docs, self.batch_size)):
|
26 |
+
docs_batch = docs[i: i + self.batch_size].to_list()
|
27 |
+
vectors_batch = self.model.encode(docs_batch).tolist()
|
28 |
+
embeddings.append(vectors_batch)
|
29 |
+
|
30 |
+
embeddings_flattened = [embedding for batch in embeddings for embedding in batch]
|
31 |
+
|
32 |
+
assert len(embeddings_flattened) == num_docs
|
33 |
+
return embeddings_flattened
|
34 |
+
|
35 |
+
def embed_docs(self, df: pd.DataFrame, data_col: str) -> pd.DataFrame:
|
36 |
+
embeddings = self.get_embeddings(df, data_col)
|
37 |
+
df['embeddings'] = embeddings
|
38 |
+
|
39 |
+
return df
|
40 |
+
|
41 |
+
|
42 |
+
def run_vectorizer(configFilePath="config.yml"):
|
43 |
+
with open(configFilePath, 'r') as file:
|
44 |
+
config = yaml.safe_load(file)
|
45 |
+
print("Config File Loaded ...")
|
46 |
+
print(config)
|
47 |
+
|
48 |
+
data_path = config['paths']['data_path']
|
49 |
+
project = config['paths']['project']
|
50 |
+
format = '.csv'
|
51 |
+
|
52 |
+
data_col_name = 'chunks'
|
53 |
+
df = pd.read_csv(data_path + project + format)
|
54 |
+
|
55 |
+
vectorizer = Vectorizer(config['sentence-transformers']['model-name'])
|
56 |
+
df_embeddings = vectorizer.embed_docs(df, data_col_name)
|
57 |
+
print("Creation of embedding completed ...")
|
58 |
+
print(df_embeddings.head())
|
59 |
+
|
60 |
+
file_path_embedding = data_path + project + '_embedding' + format
|
61 |
+
df_embeddings.to_csv(file_path_embedding)
|
62 |
+
|
63 |
+
df_read = pd.read_csv(file_path_embedding, index_col=0)
|
64 |
+
assert len(df_read) == len(df_embeddings)
|
65 |
+
print(file_path_embedding + "created ...")
|
66 |
+
|
67 |
+
|
68 |
+
if __name__ == "__main__":
|
69 |
+
run_vectorizer()
|