mia naomi
commited on
Commit
·
eee1218
1
Parent(s):
eb85a85
Update README.md
Browse files
README.md
CHANGED
@@ -58,11 +58,15 @@ GPT-J learns an inner representation of the English language that can be used to
|
|
58 |
|
59 |
```python
|
60 |
# libraries and a wrapper around hivemind's quantization code
|
61 |
-
!pip install transformers==4.14.1 bitsandbytes-cuda111==0.26.0 git+https://github.com/aicrumb/
|
62 |
-
import
|
63 |
|
64 |
-
model, tokenizer, config =
|
65 |
-
|
|
|
|
|
|
|
|
|
66 |
|
67 |
""" example output
|
68 |
Romeo: [Aside] And but in night, how tedious
|
|
|
58 |
|
59 |
```python
|
60 |
# libraries and a wrapper around hivemind's quantization code
|
61 |
+
!pip install transformers==4.14.1 bitsandbytes-cuda111==0.26.0 git+https://github.com/aicrumb/transformers-8bit -q
|
62 |
+
import transformers_8bit
|
63 |
|
64 |
+
model, tokenizer, config = transformers_8bit.load_gptj("crumb/gpt-j-6b-shakespeare", device='cuda')
|
65 |
+
|
66 |
+
prompt = tokenizer("Romeo:", return_tensors='pt')
|
67 |
+
prompt = {key: value.to('cuda') for key, value in prompt.items()}
|
68 |
+
out = model.generate(**prompt, min_length=64, max_length=64, do_sample=True, pad_token_id=tokenizer.eos_token_id)
|
69 |
+
print(tokenizer.decode(out[0]))
|
70 |
|
71 |
""" example output
|
72 |
Romeo: [Aside] And but in night, how tedious
|