Update app.py
Browse files
app.py
CHANGED
@@ -15,7 +15,6 @@ sent = st.text_area('مدخل',default_value)
|
|
15 |
tokenizer = AutoTokenizer.from_pretrained("moussaKam/AraBART", max_length=128, padding=True, pad_to_max_length = True, truncation=True)
|
16 |
model = AutoModelForMaskedLM.from_pretrained("Hamda/test-1-finetuned-AraBART")
|
17 |
pipe = pipeline("fill-mask", tokenizer=tokenizer, model=model, top_k=10)
|
18 |
-
#@st.cache
|
19 |
def next_word(text, pipe):
|
20 |
res_dict= {
|
21 |
'الكلمة المقترحة':[],
|
@@ -35,6 +34,7 @@ if (st.button('بحث', disabled=False)):
|
|
35 |
#using Graph
|
36 |
if (st.checkbox('الاستعانة بالرسم البياني', value=False)):
|
37 |
a = time()
|
|
|
38 |
VocMap = './voc.csv'
|
39 |
ScoreMap = './BM25.csv'
|
40 |
|
@@ -57,8 +57,13 @@ if (st.checkbox('الاستعانة بالرسم البياني', value=False)):
|
|
57 |
#@st.cache
|
58 |
def setQueriesVoc(df, id_list):
|
59 |
res = []
|
|
|
60 |
for e in id_list:
|
61 |
-
|
|
|
|
|
|
|
|
|
62 |
return list(set(res))
|
63 |
|
64 |
L = setQueriesVoc(df_in, id_list)
|
|
|
15 |
tokenizer = AutoTokenizer.from_pretrained("moussaKam/AraBART", max_length=128, padding=True, pad_to_max_length = True, truncation=True)
|
16 |
model = AutoModelForMaskedLM.from_pretrained("Hamda/test-1-finetuned-AraBART")
|
17 |
pipe = pipeline("fill-mask", tokenizer=tokenizer, model=model, top_k=10)
|
|
|
18 |
def next_word(text, pipe):
|
19 |
res_dict= {
|
20 |
'الكلمة المقترحة':[],
|
|
|
34 |
#using Graph
|
35 |
if (st.checkbox('الاستعانة بالرسم البياني', value=False)):
|
36 |
a = time()
|
37 |
+
state_cb = True
|
38 |
VocMap = './voc.csv'
|
39 |
ScoreMap = './BM25.csv'
|
40 |
|
|
|
57 |
#@st.cache
|
58 |
def setQueriesVoc(df, id_list):
|
59 |
res = []
|
60 |
+
|
61 |
for e in id_list:
|
62 |
+
try:
|
63 |
+
res.extend(list(df.loc[e]['ID2'].values))
|
64 |
+
except KeyError:
|
65 |
+
st.write('Token Unfound')
|
66 |
+
continue
|
67 |
return list(set(res))
|
68 |
|
69 |
L = setQueriesVoc(df_in, id_list)
|