File size: 1,200 Bytes
dcf6f0c
4bed6fd
46152dd
 
 
 
4bed6fd
b37eabc
0fdee06
 
 
 
 
dcf6f0c
 
46152dd
dcf6f0c
 
5d92ad1
 
dcf6f0c
 
 
34129b5
 
 
5d92ad1
f74ef39
 
34129b5
dcf6f0c
4bed6fd
46152dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import datetime
import json
import streamlit as st
import tokenizers
import torch
from transformers import Pipeline, pipeline


def get_answer(input, context, engine):

    answer = engine({"question": input, "context": context})

    return answer["answer"]


@st.cache
def get_context():

    BIRTHYEAR = 1952
    OTHERBIRTHYEAR = 1984
    now = datetime.datetime.now()

    with open("context.json") as f:
        context = (
            json.load(f)["info"]
            .replace("[YEAR]", str(now.year))
            .replace("[BIRTHYEAR]", str(BIRTHYEAR))
            .replace("[AGE]", str(now.year - BIRTHYEAR))
            .replace("[OTHERAGE]", str(now.year - OTHERBIRTHYEAR))
        )

    return context


@st.cache(
    hash_funcs={
        torch.nn.parameter.Parameter: lambda _: None,
        tokenizers.Tokenizer: lambda _: None,
        tokenizers.AddedToken: lambda _: None,
    },
    allow_output_mutation=True,
    show_spinner=False,
)
def load_engine() -> Pipeline:

    nlp_qa = pipeline(
        "question-answering",
        model="mrm8488/bert-italian-finedtuned-squadv1-it-alfa",
        tokenizer="mrm8488/bert-italian-finedtuned-squadv1-it-alfa",
    )

    return nlp_qa