File size: 3,482 Bytes
9f3f2ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import gradio as gr

#chatbot
from langchain.llms import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, pipeline

from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings

from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA

from textwrap import fill

DATA_PATH='data/'
DB_FAISS_PATH='vectorstore/db_faiss'

#Call of the model
model_name = "TheBloke/Llama-2-13b-Chat-GPTQ"

model = AutoModelForCausalLM.from_pretrained(model_name,
                                             device_map="auto",
                                             trust_remote_code=True)

tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)

gen_cfg = GenerationConfig.from_pretrained(model_name)
gen_cfg.max_new_tokens=512
gen_cfg.temperature=0.0000001 # 0.0
gen_cfg.return_full_text=True
gen_cfg.do_sample=True
gen_cfg.repetition_penalty=1.11

pipe=pipeline(
    task="text-generation",
    model=model,
    tokenizer=tokenizer,
    generation_config=gen_cfg
)


if gr.NO_RELOAD:
    llm = HuggingFacePipeline(pipeline=pipe)
    embeddings = HuggingFaceEmbeddings()
    db = FAISS.load_local(DB_FAISS_PATH, embeddings)
    print('todo ok')


#st.title('🦜🔗 Flint, your FinanceBot')
Description="""

## Finance Bot: Get instant insights from Finance



This chatbot is built using the Retrieval-Augmented Generation (RAG) framework

            

"""


#DB_FAISS_PATH = os.path.join(local_path, 'vectorstore_docs/db_faiss')

prompt_template = """Use the following pieces of information to answer the user's question.

    If you don't know the answer, just say that you don't know, don't try to make up an answer.



    Context: {context}

    Question: {question}



    Only return the helpful answer below and nothing else. Try to make it short. Maximum of 500 words.

    Helpful answer:

    """


prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
Chain_pdf = RetrievalQA.from_chain_type(
    llm=llm,
    chain_type="stuff",
    # retriever=db.as_retriever(search_type="similarity_score_threshold", search_kwargs={'k': 5, 'score_threshold': 0.8})
    # Similarity Search is the default way to retrieve documents relevant to a query, but we can use MMR by setting search_type = "mmr"
    # k defines how many documents are returned; defaults to 4.
    # score_threshold allows to set a minimum relevance for documents returned by the retriever, if we are using the "similarity_score_threshold" search type.
    # return_source_documents=True, # Optional parameter, returns the source documents used to answer the question
    retriever=db.as_retriever(), # (search_kwargs={'k': 5, 'score_threshold': 0.8}),
    chain_type_kwargs={"prompt": prompt},
)
#query = "When was the solar system formed?"
#result = Chain_pdf.invoke(query)
#print(fill(result['result'].strip(), width=100))


@spaces.GPU()
def final_result(query,history, Chain_pdf):
        result = Chain_pdf.invoke(query)
        print(fill(result['result'].strip(), width=100))
        return result
    
with gr.Blocks() as demo:
    system_prompt = gr.Textbox("You are helpful AI.", label="System Prompt")
    slider = gr.Slider(10, 100, render=False)

    gr.ChatInterface(
        final_result, additional_inputs=[Chain_pdf]
    )

demo.launch()