fix __main__ module issue on phoBERT.py
Browse files- __pycache__/app.cpython-310.pyc +0 -0
- __pycache__/phoBERT.cpython-310.pyc +0 -0
- phoBERT.py +3 -1
__pycache__/app.cpython-310.pyc
CHANGED
|
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
|
|
|
__pycache__/phoBERT.cpython-310.pyc
ADDED
|
Binary file (2.23 kB). View file
|
|
|
phoBERT.py
CHANGED
|
@@ -3,6 +3,7 @@ from transformers import AutoModel, AutoTokenizer
|
|
| 3 |
from underthesea import word_tokenize
|
| 4 |
import __main__
|
| 5 |
|
|
|
|
| 6 |
#phobert = AutoModel.from_pretrained("vinai/phobert-base")
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained("./")
|
| 8 |
|
|
@@ -31,8 +32,9 @@ class PhoBertModel(torch.nn.Module):
|
|
| 31 |
output = torch.nn.Sigmoid()(output_2)
|
| 32 |
return output
|
| 33 |
|
|
|
|
|
|
|
| 34 |
def getModel():
|
| 35 |
-
__main__.PhoBertModel = PhoBertModel
|
| 36 |
model = torch.load('phoBertModel.pth', map_location=torch.device('cpu'))
|
| 37 |
model.eval()
|
| 38 |
return model
|
|
|
|
| 3 |
from underthesea import word_tokenize
|
| 4 |
import __main__
|
| 5 |
|
| 6 |
+
|
| 7 |
#phobert = AutoModel.from_pretrained("vinai/phobert-base")
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained("./")
|
| 9 |
|
|
|
|
| 32 |
output = torch.nn.Sigmoid()(output_2)
|
| 33 |
return output
|
| 34 |
|
| 35 |
+
setattr(__main__, "PhoBertModel", PhoBertModel)
|
| 36 |
+
|
| 37 |
def getModel():
|
|
|
|
| 38 |
model = torch.load('phoBertModel.pth', map_location=torch.device('cpu'))
|
| 39 |
model.eval()
|
| 40 |
return model
|