Spaces:
Sleeping
Sleeping
Furkan Akkurt
commited on
Commit
·
cca1792
1
Parent(s):
2f15086
update app
Browse files- app.py +4 -5
- bap_preprocessing.py +1 -5
app.py
CHANGED
|
@@ -1,11 +1,10 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import bap_preprocessing
|
|
|
|
| 3 |
|
| 4 |
-
def tokenize(
|
| 5 |
-
|
| 6 |
-
response
|
| 7 |
-
result = { "tokens": response }
|
| 8 |
-
return result
|
| 9 |
|
| 10 |
demo = gr.Interface(
|
| 11 |
fn=tokenize,
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import bap_preprocessing
|
| 3 |
+
import json
|
| 4 |
|
| 5 |
+
def tokenize(Sentence):
|
| 6 |
+
response = bap_preprocessing.tokenize(Sentence)
|
| 7 |
+
return response
|
|
|
|
|
|
|
| 8 |
|
| 9 |
demo = gr.Interface(
|
| 10 |
fn=tokenize,
|
bap_preprocessing.py
CHANGED
|
@@ -54,11 +54,9 @@ def prepare_sequence(seq, to_ix):
|
|
| 54 |
|
| 55 |
def prob_to_tag(out):
|
| 56 |
_sentence_tag_list = []
|
| 57 |
-
#for sentence in tag_scores
|
| 58 |
_prob_to_tag = []
|
| 59 |
for ch in out:
|
| 60 |
chlist = list(ch)
|
| 61 |
-
#print(chlist)
|
| 62 |
maxi = max(chlist)
|
| 63 |
ind = chlist.index(maxi)
|
| 64 |
_prob_to_tag.append((list(tag_to_ix.keys())[ind]))
|
|
@@ -99,11 +97,9 @@ def char_unifier(_token_list):
|
|
| 99 |
|
| 100 |
def tokenize(sentence):
|
| 101 |
input = prepare_sequence(sentence, char_to_ix)
|
| 102 |
-
out= model(input)
|
| 103 |
sentence_tag_list = prob_to_tag(out)
|
| 104 |
token_char_list = _char_to_token(sentence, sentence_tag_list)
|
| 105 |
token_list = char_unifier(token_char_list)
|
| 106 |
-
# print(token_list)
|
| 107 |
return token_list
|
| 108 |
|
| 109 |
-
print(tokenize("Merhaba, ben okula gidiyorum."))
|
|
|
|
| 54 |
|
| 55 |
def prob_to_tag(out):
|
| 56 |
_sentence_tag_list = []
|
|
|
|
| 57 |
_prob_to_tag = []
|
| 58 |
for ch in out:
|
| 59 |
chlist = list(ch)
|
|
|
|
| 60 |
maxi = max(chlist)
|
| 61 |
ind = chlist.index(maxi)
|
| 62 |
_prob_to_tag.append((list(tag_to_ix.keys())[ind]))
|
|
|
|
| 97 |
|
| 98 |
def tokenize(sentence):
|
| 99 |
input = prepare_sequence(sentence, char_to_ix)
|
| 100 |
+
out = model(input)
|
| 101 |
sentence_tag_list = prob_to_tag(out)
|
| 102 |
token_char_list = _char_to_token(sentence, sentence_tag_list)
|
| 103 |
token_list = char_unifier(token_char_list)
|
|
|
|
| 104 |
return token_list
|
| 105 |
|
|
|