nroggendorff commited on
Commit
e8700aa
·
verified ·
1 Parent(s): 2d13202

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +3 -3
train.py CHANGED
@@ -25,8 +25,8 @@ PUSH_TO_HUB = True
25
 
26
  def load_data():
27
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
28
- pretrain = Dataset.from_generator(lambda: pretrain.take(int(2e+4)))
29
- instruct = load_dataset(INSTRUCT_DATASET, split="train").select(range(int(8e+3)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct
@@ -116,7 +116,7 @@ def train_model(model, tokenizer, dataset, push, isinst):
116
  gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
117
  fp16=FP16,
118
  max_grad_norm=CLIPPING,
119
- logging_steps=100
120
  )
121
 
122
  optimizer = AdamW(model.parameters(), lr=args.learning_rate)
 
25
 
26
  def load_data():
27
  pretrain = load_dataset(INPUT_DATASET, "cosmopedia-v2", split="train", streaming=True)
28
+ pretrain = Dataset.from_generator(lambda: pretrain.take(int(2e+5)))
29
+ instruct = load_dataset(INSTRUCT_DATASET, split="train").select(range(int(8e+4)))
30
  dataset_dict = DatasetDict({
31
  'pretrain': pretrain,
32
  'instruct': instruct
 
116
  gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
117
  fp16=FP16,
118
  max_grad_norm=CLIPPING,
119
+ logging_steps=10
120
  )
121
 
122
  optimizer = AdamW(model.parameters(), lr=args.learning_rate)