dgtalbug commited on
Commit
3830f5a
·
verified ·
1 Parent(s): 393ef36

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +31 -24
train.py CHANGED
@@ -1,9 +1,9 @@
1
  # ==============================================
2
- # Stephen Model Fine-Tuning Script (LoRA + PEFT) — RunPod Ready
3
- # Updated: Aug 2025
4
  # ==============================================
5
 
6
- # !pip install -q "transformers>=4.44.0" "datasets" "peft>=0.12.0" accelerate bitsandbytes sentencepiece huggingface_hub
7
 
8
  import os
9
  from datetime import datetime
@@ -38,13 +38,15 @@ login(token=HF_TOKEN, add_to_git_credential=True)
38
  log(f"Logged in as: {whoami()['name']} ✅")
39
 
40
  # ==============================================
41
- # 2. Load Dataset
42
  # ==============================================
43
  dataset_name = "dgtalbug/stephen-dataset"
44
- log(f"Loading dataset: {dataset_name}...")
45
- dataset = load_dataset(dataset_name)
46
- log(f"Dataset loaded. Splits: {list(dataset.keys())}")
47
- log(f"First example: {dataset['train'][0]}")
 
 
48
 
49
  # ==============================================
50
  # 3. Load Base Model & Tokenizer
@@ -88,14 +90,14 @@ log("Configuring LoRA...")
88
  lora_config = LoraConfig(
89
  r=16,
90
  lora_alpha=32,
91
- target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # StableCode safe
92
  lora_dropout=0.05,
93
  bias="none",
94
  task_type="CAUSAL_LM"
95
  )
96
  model = get_peft_model(model, lora_config)
97
 
98
- # ✅ Fix requires_grad warning
99
  for name, param in model.named_parameters():
100
  if "lora" in name:
101
  param.requires_grad = True
@@ -103,21 +105,27 @@ for name, param in model.named_parameters():
103
  log("LoRA config applied ✅")
104
 
105
  # ==============================================
106
- # 5. Tokenize Dataset
107
  # ==============================================
108
  log("Tokenizing dataset...")
109
 
 
 
 
 
 
 
 
 
 
 
 
110
  def tokenize_fn(example):
111
- text = f"{example.get('character', '')}: {example.get('line', '')}".strip()
112
- return tokenizer(text, truncation=True, padding="max_length", max_length=512)
113
 
114
- tokenized_datasets = dataset.map(
115
- tokenize_fn,
116
- batched=True,
117
- remove_columns=dataset["train"].column_names
118
- )
119
  log("Tokenization complete ✅")
120
- log(f"Tokenized sample: {tokenized_datasets['train'][0]}")
121
 
122
  # ==============================================
123
  # 6. Data Collator
@@ -152,13 +160,12 @@ training_args = TrainingArguments(
152
  log("Training arguments ready ✅")
153
 
154
  # ==============================================
155
- # 8. Custom Trainer — Prevent ValueError
156
  # ==============================================
157
  class NoUnpackTrainer(Trainer):
158
- """Custom Trainer that avoids Hugging Face's unpacking bug."""
159
  def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
160
  inputs = self._prepare_inputs(inputs)
161
- # Ensure labels exist for LM
162
  if "labels" not in inputs:
163
  inputs["labels"] = inputs["input_ids"].clone()
164
  outputs = model(**inputs)
@@ -169,13 +176,13 @@ log("Initializing Trainer...")
169
  trainer = NoUnpackTrainer(
170
  model=model,
171
  args=training_args,
172
- train_dataset=tokenized_datasets["train"],
173
  data_collator=data_collator
174
  )
175
  log("Trainer initialized ✅")
176
 
177
  # ==============================================
178
- # 9. Train & Push to Hub (Checkpoint Safe)
179
  # ==============================================
180
  log("Starting training...")
181
  last_ckpt = None
 
1
  # ==============================================
2
+ # Stephen Model Fine-Tuning Script (LoRA + PEFT)
3
+ # Optimized for JSONL dataset
4
  # ==============================================
5
 
6
+ !pip install -q "transformers>=4.44.0" "datasets" "peft>=0.12.0" accelerate bitsandbytes sentencepiece huggingface_hub
7
 
8
  import os
9
  from datetime import datetime
 
38
  log(f"Logged in as: {whoami()['name']} ✅")
39
 
40
  # ==============================================
41
+ # 2. Load JSONL Dataset from HF
42
  # ==============================================
43
  dataset_name = "dgtalbug/stephen-dataset"
44
+ data_file = "stephen.jsonl"
45
+
46
+ log(f"Loading dataset: {dataset_name}/{data_file} ...")
47
+ dataset = load_dataset(dataset_name, data_files=data_file, split="train")
48
+ log(f"Dataset loaded — {len(dataset)} rows")
49
+ log(f"First example: {dataset[0]}")
50
 
51
  # ==============================================
52
  # 3. Load Base Model & Tokenizer
 
90
  lora_config = LoraConfig(
91
  r=16,
92
  lora_alpha=32,
93
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # Safe for StableCode
94
  lora_dropout=0.05,
95
  bias="none",
96
  task_type="CAUSAL_LM"
97
  )
98
  model = get_peft_model(model, lora_config)
99
 
100
+ # ✅ Enable training for LoRA params
101
  for name, param in model.named_parameters():
102
  if "lora" in name:
103
  param.requires_grad = True
 
105
  log("LoRA config applied ✅")
106
 
107
  # ==============================================
108
+ # 5. Tokenize Dataset (JSONL)
109
  # ==============================================
110
  log("Tokenizing dataset...")
111
 
112
+ # Detect correct text key
113
+ first_row = dataset[0]
114
+ if "text" in first_row:
115
+ text_key = "text"
116
+ elif "prompt" in first_row:
117
+ text_key = "prompt"
118
+ else:
119
+ text_key = list(first_row.keys())[0] # fallback
120
+
121
+ log(f"Using text key: '{text_key}'")
122
+
123
  def tokenize_fn(example):
124
+ return tokenizer(example[text_key], truncation=True, padding="max_length", max_length=512)
 
125
 
126
+ tokenized_dataset = dataset.map(tokenize_fn, batched=True, remove_columns=dataset.column_names)
 
 
 
 
127
  log("Tokenization complete ✅")
128
+ log(f"Tokenized sample: {tokenized_dataset[0]}")
129
 
130
  # ==============================================
131
  # 6. Data Collator
 
160
  log("Training arguments ready ✅")
161
 
162
  # ==============================================
163
+ # 8. Custom Trainer
164
  # ==============================================
165
  class NoUnpackTrainer(Trainer):
166
+ """Avoids unpacking bug in HF Trainer."""
167
  def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
168
  inputs = self._prepare_inputs(inputs)
 
169
  if "labels" not in inputs:
170
  inputs["labels"] = inputs["input_ids"].clone()
171
  outputs = model(**inputs)
 
176
  trainer = NoUnpackTrainer(
177
  model=model,
178
  args=training_args,
179
+ train_dataset=tokenized_dataset,
180
  data_collator=data_collator
181
  )
182
  log("Trainer initialized ✅")
183
 
184
  # ==============================================
185
+ # 9. Train & Push to Hub
186
  # ==============================================
187
  log("Starting training...")
188
  last_ckpt = None