Spaces:
Sleeping
Sleeping
pikachoof
commited on
Commit
·
d796e74
0
Parent(s):
Initial commit
Browse files- .env +1 -0
- .gitattributes +39 -0
- .gitignore +0 -0
- .gitignore~ +1 -0
- .gradio/certificate.pem +31 -0
- .gradio/flagged/dataset1.csv +2 -0
- README.md +14 -0
- app.py +188 -0
- docs/test_file.pdf +3 -0
- requirements.txt +9 -0
- sample_responses.txt +0 -0
- stores/openai/index.faiss +3 -0
- stores/openai/index.pkl +3 -0
- test.py +5 -0
.env
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
OPENAI_API_KEY=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
|
.gitattributes
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
docs/test_file.pdf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
stores/openai/* filter=lfs diff=lfs merge=lfs -text
|
38 |
+
stores/openai/index.faiss filter=lfs diff=lfs merge=lfs -text
|
39 |
+
stores/openai/index.pkl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
File without changes
|
.gitignore~
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.env
|
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
.gradio/flagged/dataset1.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
query,output,timestamp
|
2 |
+
,,2025-08-17 17:23:18.609639
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: AIGINEER LLM
|
3 |
+
emoji: 👁
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.42.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
short_description: RAG LLM for the AIGINEER company documentation
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from langchain_community.document_loaders import PyPDFLoader
|
3 |
+
from langchain_community.vectorstores import FAISS
|
4 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
+
from langchain.prompts import PromptTemplate
|
6 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
7 |
+
from langchain_huggingface.llms import HuggingFacePipeline
|
8 |
+
from langchain_core.runnables import RunnablePassthrough
|
9 |
+
from langchain_core.output_parsers import StrOutputParser
|
10 |
+
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
11 |
+
import os
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
import tiktoken
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
#HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
17 |
+
|
18 |
+
#embeddings_model_name = "cointegrated/rubert-tiny2"
|
19 |
+
embeddings_model_name = "text-embedding-3-large"
|
20 |
+
|
21 |
+
llm_model_name = "gpt-4o-mini"
|
22 |
+
|
23 |
+
store_save_path = "stores/openai"
|
24 |
+
|
25 |
+
# Step 1: Document Loading and Splitting
|
26 |
+
def load_and_split_documents(pdf_path="docs/test_file.pdf"):
|
27 |
+
"""
|
28 |
+
Loads a PDF document and splits it into smaller chunks.
|
29 |
+
"""
|
30 |
+
loader = PyPDFLoader(pdf_path)
|
31 |
+
documents = loader.load()
|
32 |
+
|
33 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
34 |
+
chunk_size=800,
|
35 |
+
chunk_overlap=200
|
36 |
+
)
|
37 |
+
docs = text_splitter.split_documents(documents)
|
38 |
+
return docs
|
39 |
+
|
40 |
+
# Step 2: Embeddings and Vector Store
|
41 |
+
def get_vector_store(docs, store_save_path=store_save_path):
|
42 |
+
"""
|
43 |
+
Loads an existing vector store or creates a new one if it doesn't exist.
|
44 |
+
"""
|
45 |
+
if os.path.exists(store_save_path):
|
46 |
+
print("Loading vector store from disk...")
|
47 |
+
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
48 |
+
db = FAISS.load_local(store_save_path, embeddings, allow_dangerous_deserialization=True)
|
49 |
+
else:
|
50 |
+
print("Creating a new vector store...")
|
51 |
+
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
|
52 |
+
db = FAISS.from_documents(docs, embeddings)
|
53 |
+
db.save_local(store_save_path)
|
54 |
+
return db
|
55 |
+
|
56 |
+
# Step 3: Initialize the LLM
|
57 |
+
def initialize_llm():
|
58 |
+
"""
|
59 |
+
Initializes a Russian-specific LLM locally using transformers
|
60 |
+
"""
|
61 |
+
#repo_id = "ai-forever/rugpt3large_based_on_gpt2"
|
62 |
+
#repo_id = "ai-forever/ruBert-base"
|
63 |
+
#repo_id = "ai-forever/ruGPT-3.5-13B"
|
64 |
+
|
65 |
+
'''
|
66 |
+
llm = HuggingFaceEndpoint(
|
67 |
+
repo_id=repo_id,
|
68 |
+
temperature=0.5,
|
69 |
+
#max_new_tokens=300,
|
70 |
+
task='text-generation'
|
71 |
+
)
|
72 |
+
'''
|
73 |
+
|
74 |
+
llm = ChatOpenAI(
|
75 |
+
model=llm_model_name,
|
76 |
+
temperature=0.7
|
77 |
+
)
|
78 |
+
|
79 |
+
return llm
|
80 |
+
|
81 |
+
# Step 4: Create the LCEL RAG Chain
|
82 |
+
def setup_rag_chain(pdf_path):
|
83 |
+
"""
|
84 |
+
Sets up the complete Retrieval-Augmented Generation chain using LCEL.
|
85 |
+
"""
|
86 |
+
docs = load_and_split_documents(pdf_path)
|
87 |
+
db = get_vector_store(docs)
|
88 |
+
retriever = db.as_retriever()
|
89 |
+
llm = initialize_llm()
|
90 |
+
|
91 |
+
# Checking the vector store
|
92 |
+
#print(f"Number of vectors in FAISS index: {db.index.ntotal}")
|
93 |
+
|
94 |
+
# Define the prompt template
|
95 |
+
template = """Используйте следующие фрагменты контекста, чтобы ответить на вопрос в конце. Если вы не знаете ответа, просто скажите, что не знаете, не пытайтесь что-то придумать. Всегда будьте вежливым.
|
96 |
+
|
97 |
+
{context}
|
98 |
+
|
99 |
+
Вопрос: {question}
|
100 |
+
|
101 |
+
Полезный ответ:"""
|
102 |
+
|
103 |
+
prompt = PromptTemplate.from_template(template)
|
104 |
+
|
105 |
+
# Corrected RAG chain construction
|
106 |
+
rag_chain = (
|
107 |
+
{"context": retriever, "question": RunnablePassthrough()}
|
108 |
+
| prompt
|
109 |
+
| llm
|
110 |
+
| StrOutputParser()
|
111 |
+
)
|
112 |
+
|
113 |
+
return rag_chain
|
114 |
+
|
115 |
+
# Initialize the chain
|
116 |
+
document_name = "docs/test_file.pdf"
|
117 |
+
qa_chain = setup_rag_chain(pdf_path=document_name)
|
118 |
+
|
119 |
+
# Gradio Interface
|
120 |
+
def chat_with_doc(query):
|
121 |
+
"""
|
122 |
+
Function to handle the user query and return a response.
|
123 |
+
"""
|
124 |
+
try:
|
125 |
+
# Pass the query directly, not as a dictionary
|
126 |
+
result = qa_chain.invoke(query)
|
127 |
+
return result
|
128 |
+
except Exception as e:
|
129 |
+
return f"Произошла ошибка: {type(e).__name__} - {e!r}"
|
130 |
+
|
131 |
+
def count_tokens(text, model_name):
|
132 |
+
encoding = tiktoken.encoding_for_model(model_name)
|
133 |
+
num_tokens = len(encoding.encode(text))
|
134 |
+
return num_tokens
|
135 |
+
|
136 |
+
iface = gr.Interface(
|
137 |
+
fn=chat_with_doc,
|
138 |
+
inputs=gr.Textbox(lines=5, placeholder="Спросите что-нибудь о документе..."),
|
139 |
+
outputs="text",
|
140 |
+
title="RAG LLM модель для AIGINEER",
|
141 |
+
description="Задайте вопрос о содержании документации",
|
142 |
+
)
|
143 |
+
|
144 |
+
css_code = """
|
145 |
+
#submit-button {
|
146 |
+
background-color: #4CAF50 !important;
|
147 |
+
color: white !important;
|
148 |
+
}
|
149 |
+
|
150 |
+
#centered-text {
|
151 |
+
text-align: center;
|
152 |
+
//justify-content: center;
|
153 |
+
}
|
154 |
+
|
155 |
+
#fixed-height-textarea textarea {
|
156 |
+
overflow-y: auto !important;
|
157 |
+
}
|
158 |
+
"""
|
159 |
+
|
160 |
+
heading_text = "# AIGINEER-ИИ Модель"
|
161 |
+
subheading_text = 'Узнайте любую информацию о нормативно-технической документации (НТД) со 100% точностью при помощи ИИ модели AIGINEER'
|
162 |
+
|
163 |
+
with gr.Blocks(css=css_code) as demo:
|
164 |
+
gr.Markdown(heading_text, elem_id='centered-text')
|
165 |
+
gr.Markdown(subheading_text, elem_id='centered-text')
|
166 |
+
with gr.Row(scale=1):
|
167 |
+
with gr.Column():
|
168 |
+
query_input = gr.Textbox(interactive=True, label='Вопрос', lines=5, placeholder="Спросите что-нибудь о документе...")
|
169 |
+
with gr.Row():
|
170 |
+
clear_button = gr.ClearButton(components=[query_input], variant='secondary', value='Очистить')
|
171 |
+
submit_button = gr.Button(variant='primary', value='Отправить')
|
172 |
+
#with gr.Column():
|
173 |
+
# count_tokens_output = gr.TextArea(interactive=False, label='Стоимость запроса в токенах')
|
174 |
+
# count_tokens_button = gr.Button(variant='secondary', value='Посчитать стоимость в токенах')
|
175 |
+
response_output = gr.TextArea(interactive=True, label='Ответ', lines=8, placeholder='Тут будет отображаться ответ.')
|
176 |
+
|
177 |
+
submit_button.click(fn=chat_with_doc, inputs=query_input, outputs=response_output)
|
178 |
+
#count_tokens_button.click(fn=lambda text_input: count_tokens(text_input, llm_model_name), inputs=[query_input], outputs=[count_tokens_output])
|
179 |
+
|
180 |
+
# Launch the Gradio app
|
181 |
+
if __name__ == "__main__":
|
182 |
+
# Uncomment to run as CLI
|
183 |
+
#query = input(f"Спросите что нибудь о документе {document_name}: ")
|
184 |
+
#result = chat_with_doc(query)
|
185 |
+
#print(result)
|
186 |
+
|
187 |
+
# Run Gradio app
|
188 |
+
demo.launch()
|
docs/test_file.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d09c4619a621b34f7245d18b681fa78a2c33904e7b5a9eb9e2d7c6381c5573dd
|
3 |
+
size 1248822
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
langchain-community
|
3 |
+
langchain-core
|
4 |
+
langchain-huggingface
|
5 |
+
faiss-cpu
|
6 |
+
pypdf
|
7 |
+
sentence-transformers
|
8 |
+
huggingface-hub
|
9 |
+
langchain_openai
|
sample_responses.txt
ADDED
File without changes
|
stores/openai/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc3ab998c61db77f7d7cb6170c9b56bdf1ad71add86ba86274a5b6171966c5d3
|
3 |
+
size 1222701
|
stores/openai/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87e22ec5ccf7497c865e0993b818d49e27a6140e94d2ab4210bc547fbc196fdc
|
3 |
+
size 284848
|
test.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
|
3 |
+
generator = pipeline("text-generation", model="ai-forever/rugpt3large_based_on_gpt2")
|
4 |
+
result = generator("Сколько будет 2+2?", max_length=30)
|
5 |
+
print(result[0]["generated_text"])
|