Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
-
import streamlit as st
|
2 |
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
3 |
|
4 |
-
# Load the model with the
|
5 |
model = AutoModelForSeq2SeqLM.from_pretrained(
|
6 |
"danrdoran/flan-t5-grammar-correction-simplified-squad",
|
7 |
ignore_mismatched_sizes=True
|
@@ -9,9 +8,15 @@ model = AutoModelForSeq2SeqLM.from_pretrained(
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained("danrdoran/flan-t5-grammar-correction-simplified-squad")
|
10 |
|
11 |
# Set up the Hugging Face pipeline for text2text-generation task
|
12 |
-
model_pipeline = pipeline(
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Streamlit app UI
|
|
|
|
|
15 |
st.title("AI English Tutor")
|
16 |
st.write("Ask me a question or give me a sentence, and I will help you.")
|
17 |
|
@@ -27,18 +32,16 @@ student_question = st.text_input("Ask your question!")
|
|
27 |
|
28 |
# Generate and display response using the Hugging Face model
|
29 |
if student_question:
|
30 |
-
# Adjust prompt to ask for complete sentences
|
31 |
prompt = f"Answer the following question in complete sentences: '{student_question}'"
|
32 |
-
|
33 |
# Call the pipeline with adjusted parameters
|
34 |
response = model_pipeline(
|
35 |
-
prompt,
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
do_sample=do_sample # Enable or disable sampling
|
42 |
)
|
43 |
|
44 |
st.write("Tutor's Answer:", response[0]['generated_text'])
|
|
|
|
|
1 |
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
|
2 |
|
3 |
+
# Load the model and tokenizer with the ignore_mismatched_sizes parameter
|
4 |
model = AutoModelForSeq2SeqLM.from_pretrained(
|
5 |
"danrdoran/flan-t5-grammar-correction-simplified-squad",
|
6 |
ignore_mismatched_sizes=True
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained("danrdoran/flan-t5-grammar-correction-simplified-squad")
|
9 |
|
10 |
# Set up the Hugging Face pipeline for text2text-generation task
|
11 |
+
model_pipeline = pipeline(
|
12 |
+
"text2text-generation",
|
13 |
+
model=model,
|
14 |
+
tokenizer=tokenizer
|
15 |
+
)
|
16 |
|
17 |
# Streamlit app UI
|
18 |
+
import streamlit as st
|
19 |
+
|
20 |
st.title("AI English Tutor")
|
21 |
st.write("Ask me a question or give me a sentence, and I will help you.")
|
22 |
|
|
|
32 |
|
33 |
# Generate and display response using the Hugging Face model
|
34 |
if student_question:
|
|
|
35 |
prompt = f"Answer the following question in complete sentences: '{student_question}'"
|
36 |
+
|
37 |
# Call the pipeline with adjusted parameters
|
38 |
response = model_pipeline(
|
39 |
+
prompt,
|
40 |
+
temperature=temperature,
|
41 |
+
top_p=top_p,
|
42 |
+
top_k=top_k,
|
43 |
+
do_sample=do_sample,
|
44 |
+
decoder_start_token_id=tokenizer.pad_token_id # Add the required decoder_start_token_id
|
|
|
45 |
)
|
46 |
|
47 |
st.write("Tutor's Answer:", response[0]['generated_text'])
|