Jonathan Kittell commited on
Commit
8167236
·
1 Parent(s): d12226c

Add initial implementation of a question answering bot using Gradio and LangChain

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +38 -0
  3. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ venv
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain_huggingface.llms import HuggingFacePipeline
3
+ from langchain.prompts import PromptTemplate
4
+
5
+ # Initialize the model pipeline
6
+ llm = HuggingFacePipeline.from_model_id(
7
+ model_id="ibm-granite/granite-3.2-2b-instruct",
8
+ task="text-generation",
9
+ pipeline_kwargs={
10
+ "max_new_tokens": 512,
11
+ },
12
+ )
13
+
14
+ prompt = PromptTemplate(
15
+ input_variables=["question"],
16
+ template="Answer the following question: {question}",
17
+ )
18
+
19
+ # Create a chain with the model pipeline and prompt template
20
+ chain = prompt | llm
21
+
22
+ def ask_question(question: str) -> str:
23
+ # Invoke your chain and return the generated answer.
24
+ response = chain.invoke({"question": question})
25
+ return response
26
+
27
+ # Create a Gradio interface
28
+ iface = gr.Interface(
29
+ fn=ask_question,
30
+ inputs="text",
31
+ outputs="text",
32
+ title="Question Answering Bot",
33
+ description="Ask any question and get an answer from the model."
34
+ )
35
+
36
+ # Launch the Gradio interface
37
+ if __name__ == "__main__":
38
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ langchain
3
+ langchain_huggingface