Commit
·
44afbc6
1
Parent(s):
4efe60c
Update README.md
Browse filesSigned-off-by: jinjieyuan <[email protected]>
README.md
CHANGED
@@ -69,7 +69,7 @@ model.eval()
|
|
69 |
non_zero_params = sum([(param.data != 0).sum().item() for _, param in model.named_parameters()])
|
70 |
print(f"Number of all non-zero parameters: {non_zero_params}")
|
71 |
|
72 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
73 |
|
74 |
instruction = "Edgar eats 18 pretzels a day. If his brother eats 1/2 as many, how many does his brother eat in a week?"
|
75 |
prompt = generate_prompt(instruction)
|
@@ -84,8 +84,8 @@ with torch.no_grad():
|
|
84 |
use_cache=True,
|
85 |
num_beams=4,
|
86 |
)
|
87 |
-
|
88 |
-
|
89 |
print(output)
|
90 |
|
91 |
```
|
|
|
69 |
non_zero_params = sum([(param.data != 0).sum().item() for _, param in model.named_parameters()])
|
70 |
print(f"Number of all non-zero parameters: {non_zero_params}")
|
71 |
|
72 |
+
tokenizer = AutoTokenizer.from_pretrained("IntelLabs/Llama-1-7B-sparsity50")
|
73 |
|
74 |
instruction = "Edgar eats 18 pretzels a day. If his brother eats 1/2 as many, how many does his brother eat in a week?"
|
75 |
prompt = generate_prompt(instruction)
|
|
|
84 |
use_cache=True,
|
85 |
num_beams=4,
|
86 |
)
|
87 |
+
s = generation_output.sequences[0]
|
88 |
+
output = tokenizer.decode(s)
|
89 |
print(output)
|
90 |
|
91 |
```
|