File size: 3,024 Bytes
fdf8874
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# # we'll need a few dependencies before we can do this
# #!pip install chromadb -q

# from langchain.vectorstores import Chroma

# persist_directory = "vector_db"

# vectordb = Chroma.from_documents(documents=documents, embedding=embeddings, persist_directory=persist_directory) #### YOUR CODE HERE

# """Now we can persist our Chroma vector store - and then show an example of how you would load that persisted vector store."""

# vectordb.persist()
# vectordb = None

# """As you can see when you run the following cell - loaded the persisted vectore store is *much* quicker than reinstantiating it - and that is the benefit of `persist_directory`!"""

# vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)

# """Now that we have our docs set-up - we're ready to progress to the next part of the LangChain applciation puzzle!

# ### Tool Chain

# Now we can leverage our `oc_retriever` as a tool in our LangChain application!

# We'll be utilizing the BLOOMZ-1b7 model as our LLM today - so we can expect that our results will be less effective than if we used OpenAI's gpt-3.5-turbo, but the advantage is that no information will escape outside of our Colab environment.

# First up, let's load our model!
# """

# from langchain import HuggingFacePipeline

# llm = HuggingFacePipeline.from_model_id(
#     model_id="bigscience/bloomz-1b7", ### YOUR CODE HERE
#     task="text-generation", ### YOUR CODE HERE
#     model_kwargs={"temperature" : 0, "max_length" : 500})

# """Now let's set up our document vector store as a Retriever tool so we can leverage it in our chain!"""

# doc_retriever = vectordb.as_retriever() ### YOUR CODE HERE

# """### Final Chain

# With that set-up, we're good to set-up our final RetrievalQA chain and leverage all the documents we have in our Vector DB!
# """

# from langchain.chains import RetrievalQA

# shakespeare_qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=doc_retriever) ### YOUR CODE HERE

# """Let's test it out by itself!"""

# #shakespeare_qa.run("Who was Hamlet's Mother?")

# """### Conclusion

# Here we have it! 

# A system capable of querying over multiple documents - all without every needing to hit an external API!
# """

# def make_inference(query):
#     docs = docsearch.get_relevant_documents(query)
#     return(chain.run(input_documents=docs, question=query))

# if __name__ == "__main__":
#     # make a gradio interface
#     import gradio as gr

#     gr.Interface(
#         make_inference,
#         [
#             gr.inputs.Textbox(lines=2, label="Query"),
#         ],
#         gr.outputs.Textbox(label="Response"),
#         title="🗣️TalkToMyDoc📄",
#         description="🗣️TalkToMyDoc📄 is a tool that allows you to ask questions about a document. In this case - Hitch Hitchhiker's Guide to the Galaxy.",
#     ).launch()
import gradio as gr

def greet(name):
    return "Hello " + name + "!!"

iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()