Commit
·
c1c0b27
1
Parent(s):
c20725b
Upload train_shakespeare_char.py
Browse files
config/train_shakespeare_char.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train a miniature character-level shakespeare model
|
| 2 |
+
# good for debugging and playing on macbooks and such
|
| 3 |
+
|
| 4 |
+
out_dir = '/content/drive/My Drive/S21/out-shakespeare-char'
|
| 5 |
+
eval_interval = 250 # keep frequent because we'll overfit
|
| 6 |
+
eval_iters = 200
|
| 7 |
+
log_interval = 10 # don't print too too often
|
| 8 |
+
|
| 9 |
+
# we expect to overfit on this small dataset, so only save when val improves
|
| 10 |
+
always_save_checkpoint = True
|
| 11 |
+
|
| 12 |
+
wandb_log = False # override via command line if you like
|
| 13 |
+
wandb_project = 'shakespeare-char'
|
| 14 |
+
wandb_run_name = 'mini-gpt'
|
| 15 |
+
|
| 16 |
+
dataset = 'shakespeare_char'
|
| 17 |
+
gradient_accumulation_steps = 1
|
| 18 |
+
batch_size = 128
|
| 19 |
+
block_size = 256 # context of up to 256 previous characters
|
| 20 |
+
|
| 21 |
+
# baby GPT model :)
|
| 22 |
+
n_layer = 6
|
| 23 |
+
n_head = 6
|
| 24 |
+
n_embd = 384
|
| 25 |
+
dropout = 0.2
|
| 26 |
+
|
| 27 |
+
learning_rate = 1e-3 # with baby networks can afford to go a bit higher
|
| 28 |
+
max_iters = 50000
|
| 29 |
+
lr_decay_iters = max_iters # make equal to max_iters usually
|
| 30 |
+
min_lr = 1e-4 # learning_rate / 10 usually
|
| 31 |
+
beta2 = 0.99 # make a bit bigger because number of tokens per iter is small
|
| 32 |
+
|
| 33 |
+
warmup_iters = 100 # not super necessary potentially
|
| 34 |
+
|
| 35 |
+
# on macbook also add
|
| 36 |
+
# device = 'cpu' # run on cpu only
|
| 37 |
+
# compile = False # do not torch compile the model
|