Upload infer.py with huggingface_hub
Browse files
infer.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import torch
|
3 |
+
from model import BraLM, Vocab
|
4 |
+
|
5 |
+
with open("./vocab.json") as f:
|
6 |
+
node_dict = json.load(f)
|
7 |
+
vocab = Vocab.from_node_dict(node_dict)
|
8 |
+
|
9 |
+
model = BraLM(hidden_size=32)
|
10 |
+
model.prepare_network(vocab)
|
11 |
+
|
12 |
+
state_dict_0, state_dict_1 = torch.load("model_0.bin", weights_only=True), torch.load("model_1.bin", weights_only=True)
|
13 |
+
merged_state_dict = {**state_dict_0, **state_dict_1}
|
14 |
+
model.load_state_dict(merged_state_dict)
|
15 |
+
model.to_device("cuda:0")
|
16 |
+
|
17 |
+
head = "《罗马》描述了"
|
18 |
+
max_token = 16 - len(head)
|
19 |
+
|
20 |
+
start = [vocab((head[i]+ '->' +head[i+1])) for i in range(len(head)-1)]
|
21 |
+
ret = model.decode(start, vocab, max_token)
|
22 |
+
decode_tuple_list = [vocab.decode(p) for p in ret]
|
23 |
+
decode_sentence = decode_tuple_list[0][0] + "".join([p[-1] for p in decode_tuple_list])
|
24 |
+
|
25 |
+
print(decode_sentence)
|