Spaces:
Runtime error
Runtime error
Commit
·
accfb07
1
Parent(s):
5812cd1
Happy Hugging Face!
Browse files- generator.py +5 -6
generator.py
CHANGED
|
@@ -50,10 +50,9 @@ def run_model(input_string, **generator_args):
|
|
| 50 |
output = hftokenizer.batch_decode(res, skip_special_tokens=True)
|
| 51 |
output = [item.split("<sep>") for item in output]
|
| 52 |
return output
|
| 53 |
-
|
| 54 |
al_tokenizer = att.from_pretrained("deepset/electra-base-squad2")
|
| 55 |
al_model = amqa.from_pretrained("deepset/electra-base-squad2")
|
| 56 |
-
|
| 57 |
# al_model = pickle.load(open('models/al_model.sav', 'rb'))
|
| 58 |
# al_tokenizer = pickle.load(open('models/al_tokenizer.sav', 'rb'))
|
| 59 |
def QA(question, context):
|
|
@@ -62,11 +61,11 @@ def QA(question, context):
|
|
| 62 |
format = {
|
| 63 |
'question':question,
|
| 64 |
'context':context
|
| 65 |
-
|
| 66 |
res = nlp(format)
|
| 67 |
output = f"{question}\n{string.capwords(res['answer'])}\tscore : [{res['score']}] \n"
|
| 68 |
return output
|
| 69 |
-
|
| 70 |
# # Run the model, the deepset way
|
| 71 |
# with torch.no_grad():
|
| 72 |
# output = model(**inputs)
|
|
@@ -85,7 +84,7 @@ def QA(question, context):
|
|
| 85 |
|
| 86 |
def gen_question(inputs):
|
| 87 |
|
| 88 |
-
|
| 89 |
|
| 90 |
return questions
|
| 91 |
|
|
@@ -100,7 +99,7 @@ def read_file(filepath_name):
|
|
| 100 |
return context
|
| 101 |
|
| 102 |
def create_string_for_generator(context):
|
| 103 |
-
|
| 104 |
return (gen_list[0][0]).split('? ')
|
| 105 |
|
| 106 |
def creator(context):
|
|
|
|
| 50 |
output = hftokenizer.batch_decode(res, skip_special_tokens=True)
|
| 51 |
output = [item.split("<sep>") for item in output]
|
| 52 |
return output
|
|
|
|
| 53 |
al_tokenizer = att.from_pretrained("deepset/electra-base-squad2")
|
| 54 |
al_model = amqa.from_pretrained("deepset/electra-base-squad2")
|
| 55 |
+
|
| 56 |
# al_model = pickle.load(open('models/al_model.sav', 'rb'))
|
| 57 |
# al_tokenizer = pickle.load(open('models/al_tokenizer.sav', 'rb'))
|
| 58 |
def QA(question, context):
|
|
|
|
| 61 |
format = {
|
| 62 |
'question':question,
|
| 63 |
'context':context
|
| 64 |
+
}
|
| 65 |
res = nlp(format)
|
| 66 |
output = f"{question}\n{string.capwords(res['answer'])}\tscore : [{res['score']}] \n"
|
| 67 |
return output
|
| 68 |
+
# inputs = tokenizer(question, context, return_tensors="pt")
|
| 69 |
# # Run the model, the deepset way
|
| 70 |
# with torch.no_grad():
|
| 71 |
# output = model(**inputs)
|
|
|
|
| 84 |
|
| 85 |
def gen_question(inputs):
|
| 86 |
|
| 87 |
+
questions = run_model(inputs)
|
| 88 |
|
| 89 |
return questions
|
| 90 |
|
|
|
|
| 99 |
return context
|
| 100 |
|
| 101 |
def create_string_for_generator(context):
|
| 102 |
+
gen_list = gen_question(context)
|
| 103 |
return (gen_list[0][0]).split('? ')
|
| 104 |
|
| 105 |
def creator(context):
|