dgtalbug commited on
Commit
8211b00
·
verified ·
1 Parent(s): 3830f5a

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +100 -42
train.py CHANGED
@@ -1,6 +1,6 @@
1
  # ==============================================
2
  # Stephen Model Fine-Tuning Script (LoRA + PEFT)
3
- # Optimized for JSONL dataset
4
  # ==============================================
5
 
6
  !pip install -q "transformers>=4.44.0" "datasets" "peft>=0.12.0" accelerate bitsandbytes sentencepiece huggingface_hub
@@ -11,9 +11,10 @@ from huggingface_hub import login, whoami
11
  from datasets import load_dataset
12
  from transformers import (
13
  AutoTokenizer, AutoModelForCausalLM, TrainingArguments,
14
- Trainer, DataCollatorForLanguageModeling
15
  )
16
  from peft import LoraConfig, get_peft_model
 
17
  import torch
18
 
19
  # ==============================================
@@ -27,21 +28,17 @@ def log(msg):
27
  # ==============================================
28
  HF_TOKEN = os.getenv("HF_TOKEN")
29
  if not HF_TOKEN:
30
- raise ValueError(
31
- "❌ HF_TOKEN environment variable not set.\n"
32
- "Run in RunPod: %env HF_TOKEN='your_token_here'\n"
33
- "Or set in UI → Environment Variables."
34
- )
35
 
36
  log("Logging into Hugging Face...")
37
  login(token=HF_TOKEN, add_to_git_credential=True)
38
  log(f"Logged in as: {whoami()['name']} ✅")
39
 
40
  # ==============================================
41
- # 2. Load JSONL Dataset from HF
42
  # ==============================================
43
- dataset_name = "dgtalbug/stephen-dataset"
44
- data_file = "stephen.jsonl"
45
 
46
  log(f"Loading dataset: {dataset_name}/{data_file} ...")
47
  dataset = load_dataset(dataset_name, data_files=data_file, split="train")
@@ -51,7 +48,7 @@ log(f"First example: {dataset[0]}")
51
  # ==============================================
52
  # 3. Load Base Model & Tokenizer
53
  # ==============================================
54
- base_model = "dgtalbug/stable-code-instruct-3b"
55
  log(f"Loading base model: {base_model}...")
56
 
57
  tokenizer = AutoTokenizer.from_pretrained(
@@ -62,23 +59,31 @@ tokenizer = AutoTokenizer.from_pretrained(
62
  if tokenizer.pad_token is None:
63
  tokenizer.pad_token = tokenizer.eos_token
64
 
 
 
 
 
 
 
65
  try:
66
  model = AutoModelForCausalLM.from_pretrained(
67
  base_model,
68
  token=HF_TOKEN,
69
- load_in_8bit=True,
70
  device_map="auto",
71
  torch_dtype=torch.float16,
72
- trust_remote_code=True
 
 
73
  )
74
  except Exception as e:
75
- log(f"⚠️ 8-bit load failed: {e} — falling back to full precision.")
76
  model = AutoModelForCausalLM.from_pretrained(
77
  base_model,
78
  token=HF_TOKEN,
79
  device_map="auto",
80
  torch_dtype=torch.float16,
81
- trust_remote_code=True
 
82
  )
83
 
84
  log("Base model loaded ✅")
@@ -86,42 +91,79 @@ log("Base model loaded ✅")
86
  # ==============================================
87
  # 4. LoRA Config
88
  # ==============================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  log("Configuring LoRA...")
 
 
 
 
 
90
  lora_config = LoraConfig(
91
  r=16,
92
  lora_alpha=32,
93
- target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # Safe for StableCode
94
  lora_dropout=0.05,
95
  bias="none",
96
  task_type="CAUSAL_LM"
97
  )
 
 
98
  model = get_peft_model(model, lora_config)
99
 
100
- # Enable training for LoRA params
 
101
  for name, param in model.named_parameters():
102
- if "lora" in name:
103
- param.requires_grad = True
 
 
 
104
 
105
- log("LoRA config applied ")
 
106
 
 
 
107
  # ==============================================
108
- # 5. Tokenize Dataset (JSONL)
109
  # ==============================================
110
  log("Tokenizing dataset...")
111
 
112
- # Detect correct text key
113
  first_row = dataset[0]
114
  if "text" in first_row:
115
  text_key = "text"
116
  elif "prompt" in first_row:
117
  text_key = "prompt"
118
  else:
119
- text_key = list(first_row.keys())[0] # fallback
120
 
121
  log(f"Using text key: '{text_key}'")
122
 
123
  def tokenize_fn(example):
124
- return tokenizer(example[text_key], truncation=True, padding="max_length", max_length=512)
 
 
125
 
126
  tokenized_dataset = dataset.map(tokenize_fn, batched=True, remove_columns=dataset.column_names)
127
  log("Tokenization complete ✅")
@@ -137,35 +179,48 @@ data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
137
  # ==============================================
138
  output_dir = "./stephen-lora"
139
  log("Preparing training arguments...")
 
140
  training_args = TrainingArguments(
141
  output_dir=output_dir,
142
  overwrite_output_dir=True,
143
- per_device_train_batch_size=4,
144
- gradient_accumulation_steps=4,
145
  gradient_checkpointing=True,
146
- warmup_steps=100,
147
- max_steps=1000,
148
- learning_rate=2e-4,
 
149
  lr_scheduler_type="cosine",
150
  fp16=True,
 
151
  logging_dir="./logs",
152
- logging_steps=10,
153
- save_strategy="steps",
154
- save_steps=200,
155
  save_total_limit=2,
156
- report_to="none",
157
  push_to_hub=True,
158
- ddp_find_unused_parameters=False
 
 
159
  )
160
  log("Training arguments ready ✅")
161
 
162
  # ==============================================
163
- # 8. Custom Trainer
164
  # ==============================================
165
- class NoUnpackTrainer(Trainer):
166
- """Avoids unpacking bug in HF Trainer."""
167
- def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
168
- inputs = self._prepare_inputs(inputs)
 
 
 
 
 
 
 
 
 
 
169
  if "labels" not in inputs:
170
  inputs["labels"] = inputs["input_ids"].clone()
171
  outputs = model(**inputs)
@@ -173,7 +228,7 @@ class NoUnpackTrainer(Trainer):
173
  return (loss, outputs) if return_outputs else loss
174
 
175
  log("Initializing Trainer...")
176
- trainer = NoUnpackTrainer(
177
  model=model,
178
  args=training_args,
179
  train_dataset=tokenized_dataset,
@@ -182,9 +237,12 @@ trainer = NoUnpackTrainer(
182
  log("Trainer initialized ✅")
183
 
184
  # ==============================================
185
- # 9. Train & Push to Hub
186
  # ==============================================
187
- log("Starting training...")
 
 
 
188
  last_ckpt = None
189
  if os.path.isdir(output_dir):
190
  checkpoints = [d for d in os.listdir(output_dir) if d.startswith("checkpoint-")]
 
1
  # ==============================================
2
  # Stephen Model Fine-Tuning Script (LoRA + PEFT)
3
+ # Clean + Debug-Enhanced for Grad & Deprecation Warnings
4
  # ==============================================
5
 
6
  !pip install -q "transformers>=4.44.0" "datasets" "peft>=0.12.0" accelerate bitsandbytes sentencepiece huggingface_hub
 
11
  from datasets import load_dataset
12
  from transformers import (
13
  AutoTokenizer, AutoModelForCausalLM, TrainingArguments,
14
+ Trainer, DataCollatorForLanguageModeling, BitsAndBytesConfig
15
  )
16
  from peft import LoraConfig, get_peft_model
17
+ from peft import prepare_model_for_kbit_training
18
  import torch
19
 
20
  # ==============================================
 
28
  # ==============================================
29
  HF_TOKEN = os.getenv("HF_TOKEN")
30
  if not HF_TOKEN:
31
+ raise ValueError("❌ HF_TOKEN environment variable not set.")
 
 
 
 
32
 
33
  log("Logging into Hugging Face...")
34
  login(token=HF_TOKEN, add_to_git_credential=True)
35
  log(f"Logged in as: {whoami()['name']} ✅")
36
 
37
  # ==============================================
38
+ # 2. Load Dataset
39
  # ==============================================
40
+ dataset_name = "dgtalbug/stephen-dataset" # CHANGE THIS
41
+ data_file = "stephen.jsonl" # CHANGE THIS
42
 
43
  log(f"Loading dataset: {dataset_name}/{data_file} ...")
44
  dataset = load_dataset(dataset_name, data_files=data_file, split="train")
 
48
  # ==============================================
49
  # 3. Load Base Model & Tokenizer
50
  # ==============================================
51
+ base_model = "dgtalbug/stable-code-instruct-3b" # CHANGE THIS
52
  log(f"Loading base model: {base_model}...")
53
 
54
  tokenizer = AutoTokenizer.from_pretrained(
 
59
  if tokenizer.pad_token is None:
60
  tokenizer.pad_token = tokenizer.eos_token
61
 
62
+ # ✅ Quantization config
63
+ bnb_config = BitsAndBytesConfig(
64
+ load_in_8bit=True,
65
+ llm_int8_threshold=6.0
66
+ )
67
+
68
  try:
69
  model = AutoModelForCausalLM.from_pretrained(
70
  base_model,
71
  token=HF_TOKEN,
 
72
  device_map="auto",
73
  torch_dtype=torch.float16,
74
+ trust_remote_code=True,
75
+ return_dict=True,
76
+ quantization_config=bnb_config
77
  )
78
  except Exception as e:
79
+ log(f"⚠️ Quantized load failed: {e} — falling back to fp16.")
80
  model = AutoModelForCausalLM.from_pretrained(
81
  base_model,
82
  token=HF_TOKEN,
83
  device_map="auto",
84
  torch_dtype=torch.float16,
85
+ trust_remote_code=True,
86
+ return_dict=True
87
  )
88
 
89
  log("Base model loaded ✅")
 
91
  # ==============================================
92
  # 4. LoRA Config
93
  # ==============================================
94
+ # log("Configuring LoRA...")
95
+ # lora_config = LoraConfig(
96
+ # r=16,
97
+ # lora_alpha=32,
98
+ # target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
99
+ # lora_dropout=0.05,
100
+ # bias="none",
101
+ # task_type="CAUSAL_LM"
102
+ # )
103
+ # model = get_peft_model(model, lora_config)
104
+
105
+ # # ✅ Ensure LoRA params require grad
106
+ # for name, param in model.named_parameters():
107
+ # if "lora" in name:
108
+ # param.requires_grad = True
109
+ # else:
110
+ # param.requires_grad = False
111
+
112
+ # # ✅ Sanity check: see how many params are trainable
113
+ # model.print_trainable_parameters()
114
+
115
+ # log("LoRA config applied ✅")
116
  log("Configuring LoRA...")
117
+
118
+ # First, prepare for 8-bit training (important for bitsandbytes)
119
+ model = prepare_model_for_kbit_training(model)
120
+
121
+ # LoRA config
122
  lora_config = LoraConfig(
123
  r=16,
124
  lora_alpha=32,
125
+ target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # adjust if needed
126
  lora_dropout=0.05,
127
  bias="none",
128
  task_type="CAUSAL_LM"
129
  )
130
+
131
+ # Apply LoRA
132
  model = get_peft_model(model, lora_config)
133
 
134
+ # Double-check trainable params
135
+ trainable_params = []
136
  for name, param in model.named_parameters():
137
+ if param.requires_grad:
138
+ trainable_params.append(name)
139
+
140
+ if not trainable_params:
141
+ raise RuntimeError("❌ No parameters set to require gradients! LoRA not applied correctly.")
142
 
143
+ log(f" Found {len(trainable_params)} trainable parameters.")
144
+ log(f"First 20 trainable params: {trainable_params[:20]}")
145
 
146
+ # Print PEFT/LoRA summary
147
+ model.print_trainable_parameters()
148
  # ==============================================
149
+ # 5. Tokenize Dataset
150
  # ==============================================
151
  log("Tokenizing dataset...")
152
 
 
153
  first_row = dataset[0]
154
  if "text" in first_row:
155
  text_key = "text"
156
  elif "prompt" in first_row:
157
  text_key = "prompt"
158
  else:
159
+ text_key = list(first_row.keys())[0]
160
 
161
  log(f"Using text key: '{text_key}'")
162
 
163
  def tokenize_fn(example):
164
+ tokenized = tokenizer(example[text_key], truncation=True, padding="max_length", max_length=512)
165
+ tokenized["labels"] = tokenized["input_ids"].copy() # ✅ Ensure labels exist for grad
166
+ return tokenized
167
 
168
  tokenized_dataset = dataset.map(tokenize_fn, batched=True, remove_columns=dataset.column_names)
169
  log("Tokenization complete ✅")
 
179
  # ==============================================
180
  output_dir = "./stephen-lora"
181
  log("Preparing training arguments...")
182
+
183
  training_args = TrainingArguments(
184
  output_dir=output_dir,
185
  overwrite_output_dir=True,
186
+ per_device_train_batch_size=8,
187
+ gradient_accumulation_steps=2,
188
  gradient_checkpointing=True,
189
+ warmup_steps=50,
190
+ num_train_epochs=3,
191
+ max_steps=-1,
192
+ learning_rate=1e-4,
193
  lr_scheduler_type="cosine",
194
  fp16=True,
195
+ optim="adamw_torch",
196
  logging_dir="./logs",
197
+ logging_steps=20,
198
+ save_strategy="epoch",
 
199
  save_total_limit=2,
 
200
  push_to_hub=True,
201
+ hub_strategy="end",
202
+ ddp_find_unused_parameters=False,
203
+ label_names=["labels"]
204
  )
205
  log("Training arguments ready ✅")
206
 
207
  # ==============================================
208
+ # 8. Debugging Helper Hooks
209
  # ==============================================
210
+ def debug_batch(batch):
211
+ log(f"🔍 Debug batch keys: {list(batch.keys())}")
212
+ log(f"🔍 First input_ids: {batch['input_ids'][0][:10]}")
213
+ log(f"🔍 First labels: {batch['labels'][0][:10]}")
214
+ log(f"🔍 labels.requires_grad? {torch.tensor(batch['labels']).requires_grad}")
215
+
216
+ # ==============================================
217
+ # 9. Custom Trainer (safe + debug)
218
+ # ==============================================
219
+ class SafeTrainer(Trainer):
220
+ def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
221
+ # Debug batch content once
222
+ if self.state.global_step == 0:
223
+ debug_batch(inputs)
224
  if "labels" not in inputs:
225
  inputs["labels"] = inputs["input_ids"].clone()
226
  outputs = model(**inputs)
 
228
  return (loss, outputs) if return_outputs else loss
229
 
230
  log("Initializing Trainer...")
231
+ trainer = SafeTrainer(
232
  model=model,
233
  args=training_args,
234
  train_dataset=tokenized_dataset,
 
237
  log("Trainer initialized ✅")
238
 
239
  # ==============================================
240
+ # 10. Train & Push
241
  # ==============================================
242
+ trainable_params = [n for n, p in model.named_parameters() if p.requires_grad]
243
+ log(f"Trainable params count: {len(trainable_params)}")
244
+ log(f"First 20 trainable params: {trainable_params[:20]}")
245
+
246
  last_ckpt = None
247
  if os.path.isdir(output_dir):
248
  checkpoints = [d for d in os.listdir(output_dir) if d.startswith("checkpoint-")]