AvocadoMuffin commited on
Commit
b0b4ec9
Β·
verified Β·
1 Parent(s): 0e823ae

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +164 -329
train.py CHANGED
@@ -1,371 +1,206 @@
1
- #!/usr/bin/env python
2
- # train_cuad_lora_efficient.py - FIXED VERSION
3
- """
4
- CUAD fine-tune with LoRA - Fixed for realistic training times
5
- """
6
 
7
- import os, json, random, gc, time
8
- from collections import defaultdict
9
- from pathlib import Path
10
-
11
- import torch, numpy as np
12
- from datasets import load_dataset, Dataset, disable_caching
13
  from transformers import (
14
- AutoTokenizer, AutoModelForQuestionAnswering,
15
- TrainingArguments, default_data_collator, Trainer
 
 
 
16
  )
17
- from peft import LoraConfig, get_peft_model, TaskType
18
- import evaluate
19
  from huggingface_hub import login
20
 
21
- disable_caching()
22
-
23
- # Set tokenizers parallelism to avoid warnings
24
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
25
-
26
- # ─────────────────────────────────────────────────────────────── config ──
27
-
28
- MAX_LEN = 512 # Slightly longer context
29
- DOC_STRIDE = 256 # Larger stride = fewer chunks = faster training
30
- SEED = 42
31
- BATCH_SIZE = 1000 # Process in larger, more efficient batches
32
-
33
- # Back to reasonable subset size since you've trained 5k before
34
- USE_SUBSET = True
35
- SUBSET_SIZE = 7000 # Good middle ground - more than your 5k success
36
-
37
- def set_seed(seed):
38
- random.seed(seed); np.random.seed(seed); torch.manual_seed(seed)
39
- torch.cuda.manual_seed_all(seed)
40
-
41
- def balance_has_answer(dataset, ratio=2.0, max_samples=None):
42
- """Keep all has-answer rows, down-sample no-answer rows to `ratio`."""
43
- has, no = [], []
44
- for ex in dataset:
45
- (has if ex["answers"]["text"] else no).append(ex)
46
-
47
- print(f"πŸ“Š Original: {len(has)} has-answer, {len(no)} no-answer")
48
-
49
- # FIXED: Apply max_samples FIRST, then balance
50
- if max_samples:
51
- total_available = len(has) + len(no)
52
- if total_available > max_samples:
53
- # Sample proportionally from original distribution
54
- has_ratio = len(has) / total_available
55
- target_has = int(max_samples * has_ratio)
56
- target_no = max_samples - target_has
57
-
58
- has = random.sample(has, min(target_has, len(has)))
59
- no = random.sample(no, min(target_no, len(no)))
60
- print(f"πŸ“‰ Pre-balance subset: {len(has)} has-answer, {len(no)} no-answer")
61
-
62
- # Now balance within the subset
63
- k = int(len(has) * ratio)
64
- if len(no) > k:
65
- no = random.sample(no, k)
66
-
67
- balanced = has + no
68
- random.shuffle(balanced) # Shuffle the final dataset
69
-
70
- print(f"πŸ“Š Final balanced: {len([x for x in balanced if x['answers']['text']])} has-answer, {len([x for x in balanced if not x['answers']['text']])} no-answer")
71
- print(f"πŸ“Š Total examples: {len(balanced)}")
72
-
73
- return Dataset.from_list(balanced)
74
-
75
- # ────────────────────────────────────────────────────────────── postproc ──
76
-
77
- metric = evaluate.load("squad")
78
-
79
- def postprocess_qa(examples, features, raw_predictions, tokenizer):
80
- """HF-style span extraction + n-best, returns SQuAD format dict."""
81
- all_start, all_end = raw_predictions
82
- example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
83
- features_per_example = defaultdict(list)
84
- for i, feat_id in enumerate(features["example_id"]):
85
- features_per_example[example_id_to_index[feat_id]].append(i)
86
-
87
- predictions = []
88
-
89
- for example_idx, example in enumerate(examples):
90
- best_score = -1e9
91
- best_span = ""
92
- context = example["context"]
93
-
94
- for feat_idx in features_per_example[example_idx]:
95
- start_logit = all_start[feat_idx]
96
- end_logit = all_end[feat_idx]
97
- offset = features["offset_mapping"][feat_idx]
98
-
99
- start_idx = int(np.argmax(start_logit))
100
- end_idx = int(np.argmax(end_logit))
101
-
102
- if start_idx <= end_idx < len(offset):
103
- start_char, _ = offset[start_idx]
104
- _, end_char = offset[end_idx]
105
- span = context[start_char:end_char].strip()
106
- score = start_logit[start_idx] + end_logit[end_idx]
107
- if score > best_score and span:
108
- best_score, best_span = score, span
109
-
110
- predictions.append(
111
- {"id": example["id"], "prediction_text": best_span}
112
- )
113
- return predictions
114
-
115
- # ───────────────────────────────────────────────────────────── preprocessing ──
116
-
117
- def preprocess_training_batch(examples, tokenizer):
118
- """Training preprocessing - NO offset_mapping included"""
119
- questions = examples["question"]
120
- contexts = examples["context"]
121
-
122
  tokenized_examples = tokenizer(
123
- questions,
124
- contexts,
125
  truncation="only_second",
126
- max_length=MAX_LEN,
127
- stride=DOC_STRIDE,
128
  return_overflowing_tokens=True,
129
  return_offsets_mapping=True,
130
  padding="max_length",
131
  )
132
-
133
  sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
134
  offset_mapping = tokenized_examples.pop("offset_mapping")
135
-
136
  start_positions = []
137
  end_positions = []
138
-
139
  for i, offsets in enumerate(offset_mapping):
140
- cls_index = 0
 
141
  sample_index = sample_mapping[i]
142
  answers = examples["answers"][sample_index]
143
-
144
- if not answers["text"] or not answers["text"][0]:
145
  start_positions.append(cls_index)
146
  end_positions.append(cls_index)
147
- continue
148
-
149
- answer_start_char = answers["answer_start"][0]
150
- answer_text = answers["text"][0]
151
- answer_end_char = answer_start_char + len(answer_text)
152
-
153
- token_start_index = cls_index
154
- token_end_index = cls_index
155
-
156
- for token_index, (start_char, end_char) in enumerate(offsets):
157
- if start_char <= answer_start_char < end_char:
158
- token_start_index = token_index
159
- if start_char < answer_end_char <= end_char:
160
- token_end_index = token_index
161
- break
162
-
163
- if token_start_index <= token_end_index and token_start_index > 0:
164
- start_positions.append(token_start_index)
165
- end_positions.append(token_end_index)
166
  else:
167
- start_positions.append(cls_index)
168
- end_positions.append(cls_index)
169
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  tokenized_examples["start_positions"] = start_positions
171
  tokenized_examples["end_positions"] = end_positions
172
-
173
  return tokenized_examples
174
 
175
- def preprocess_validation_batch(examples, tokenizer):
176
- """Validation preprocessing - INCLUDES offset_mapping and example_id"""
177
- questions = examples["question"]
178
- contexts = examples["context"]
179
-
180
- tokenized_examples = tokenizer(
181
- questions,
182
- contexts,
183
- truncation="only_second",
184
- max_length=MAX_LEN,
185
- stride=DOC_STRIDE,
186
- return_overflowing_tokens=True,
187
- return_offsets_mapping=True,
188
- padding="max_length",
189
- )
190
-
191
- sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
192
-
193
- tokenized_examples["example_id"] = [
194
- examples["id"][sample_mapping[i]] for i in range(len(tokenized_examples["input_ids"]))
195
- ]
196
-
197
- return tokenized_examples
198
-
199
- def preprocess_dataset_streaming(dataset, tokenizer, desc="Processing", is_training=True):
200
- """Process dataset in batches using HuggingFace's map function with batching."""
201
- print(f"πŸ”„ {desc} dataset with batch processing...")
202
-
203
- if is_training:
204
- preprocess_fn = preprocess_training_batch
205
- else:
206
- preprocess_fn = preprocess_validation_batch
207
-
208
- processed = dataset.map(
209
- lambda examples: preprocess_fn(examples, tokenizer),
210
- batched=True,
211
- batch_size=BATCH_SIZE,
212
- remove_columns=dataset.column_names,
213
- desc=desc,
214
- num_proc=1,
215
- )
216
-
217
- print(f"βœ… {desc} completed: {len(processed)} features")
218
- return processed
219
 
220
- # ───────────────────────────────────────────────────────────────── main ──
 
 
 
 
221
 
222
  def main():
223
- set_seed(SEED)
224
-
225
- model_repo = os.getenv("MODEL_NAME", "AvocadoMuffin/roberta-cuad-qa-v4")
226
-
227
- if (tokn := os.getenv("roberta_token")):
228
- try:
229
- login(tokn)
230
- print("πŸ”‘ HuggingFace Hub login OK")
231
- except Exception as e:
232
- print(f"⚠️ Hub login failed: {e}")
233
- tokn = None
234
-
235
- print("πŸ“š Loading CUAD…")
236
- try:
237
- cuad = load_dataset("theatticusproject/cuad-qa", split="train", trust_remote_code=True)
238
- print(f"βœ… Loaded {len(cuad)} examples")
239
- except Exception as e:
240
- print(f"❌ Dataset loading failed: {e}")
241
- cuad = load_dataset("theatticusproject/cuad-qa", split="train", trust_remote_code=True, download_mode="force_redownload")
242
-
243
- cuad = cuad.shuffle(seed=SEED)
244
-
245
- # FIXED: Apply subset reduction more aggressively
246
- subset_size = SUBSET_SIZE if USE_SUBSET else None
247
- cuad = balance_has_answer(cuad, ratio=1.5, max_samples=subset_size) # Reduced ratio too
248
- print(f"πŸ“Š Final dataset size: {len(cuad)} examples")
249
-
250
- # Estimate features after preprocessing
251
- avg_features_per_example = 2.5 # Conservative estimate with stride
252
- estimated_features = len(cuad) * avg_features_per_example
253
- print(f"πŸ“Š Estimated training features: ~{int(estimated_features)}")
254
-
255
- ds = cuad.train_test_split(test_size=0.1, seed=SEED)
256
- train_raw, val_raw = ds["train"], ds["test"]
257
-
258
- # ── tokeniser & model ──
259
- base_ckpt = "deepset/roberta-base-squad2"
260
- tok = AutoTokenizer.from_pretrained(base_ckpt, use_fast=True)
261
- model = AutoModelForQuestionAnswering.from_pretrained(base_ckpt)
262
-
263
- # FIXED: Lighter LoRA config for faster training
264
- lora = LoraConfig(
265
- task_type=TaskType.QUESTION_ANS,
266
- r=16, # Reduced from 32
267
- lora_alpha=32, # Reduced from 64
268
  lora_dropout=0.1,
269
- target_modules=["query", "value"], # Fewer modules
 
270
  )
271
- model = get_peft_model(model, lora)
272
- model.print_trainable_parameters()
273
-
274
- # ── preprocessing ─────────────────────────────────────────
275
- print("πŸ”„ Starting preprocessing...")
276
-
277
- train_feats = preprocess_dataset_streaming(train_raw, tok, "Training", is_training=True)
278
- val_feats = preprocess_dataset_streaming(val_raw, tok, "Validation", is_training=False)
279
 
280
- print(f"βœ… Preprocessing completed!")
281
- print(f" Training features: {len(train_feats)}")
282
- print(f" Validation features: {len(val_feats)}")
 
 
 
 
 
 
 
283
 
284
- # ── training args - FIXED for reasonable training time ──
285
- batch_size = 16 # Good balance
286
- gradient_accumulation_steps = 2
287
- effective_batch_size = batch_size * gradient_accumulation_steps
288
-
289
- num_epochs = 3 # Keep it reasonable
290
- steps_per_epoch = len(train_feats) // effective_batch_size
291
- total_steps = steps_per_epoch * num_epochs
292
-
293
- eval_steps = max(25, steps_per_epoch // 8) # More frequent eval
294
- save_steps = eval_steps * 3
295
-
296
- print(f"πŸ“Š Training configuration:")
297
- print(f" Effective batch size: {effective_batch_size}")
298
- print(f" Steps per epoch: {steps_per_epoch}")
299
- print(f" Total steps: {total_steps}")
300
- print(f" Estimated time: ~{total_steps/2.4/60:.1f} minutes")
301
- print(f" Eval every: {eval_steps} steps")
302
-
303
- args = TrainingArguments(
304
- output_dir="./cuad_lora_out",
305
- learning_rate=3e-5, # Slightly lower LR
306
- num_train_epochs=num_epochs,
307
- per_device_train_batch_size=batch_size,
308
- per_device_eval_batch_size=8,
309
- gradient_accumulation_steps=gradient_accumulation_steps,
310
- fp16=False, bf16=True,
311
- eval_strategy="steps",
312
- eval_steps=eval_steps,
313
- save_steps=save_steps,
314
  save_total_limit=2,
315
- weight_decay=0.01,
316
- lr_scheduler_type="cosine",
317
- warmup_ratio=0.1,
318
- load_best_model_at_end=False,
319
- logging_steps=10, # More frequent logging
320
- report_to="none",
321
- dataloader_num_workers=2,
322
- dataloader_pin_memory=True,
323
- remove_unused_columns=True,
 
 
 
 
 
324
  )
325
 
326
  trainer = Trainer(
327
  model=model,
328
- args=args,
329
- train_dataset=train_feats,
330
- eval_dataset=val_feats,
331
- tokenizer=tok,
332
  data_collator=default_data_collator,
333
- compute_metrics=None,
334
  )
335
 
336
- print("πŸš€ Training…")
337
- try:
338
- trainer.train()
339
- print("βœ… Training completed successfully!")
340
- except Exception as e:
341
- print(f"❌ Training failed: {e}")
342
- try:
343
- trainer.save_model("./cuad_lora_out_partial")
344
- tok.save_pretrained("./cuad_lora_out_partial")
345
- print("πŸ’Ύ Partial model saved")
346
- except:
347
- print("❌ Could not save partial model")
348
- raise e
349
-
350
- print("βœ… Done. Best eval_loss:", trainer.state.best_metric)
351
- trainer.save_model("./cuad_lora_out")
352
- tok.save_pretrained("./cuad_lora_out")
353
-
354
- # Push to hub
355
- if tokn:
356
- for attempt in range(3):
357
- try:
358
- print(f"⬆️ Pushing to Hub (attempt {attempt + 1}/3)...")
359
- trainer.push_to_hub(model_repo, private=False)
360
- tok.push_to_hub(model_repo, private=False)
361
- print("πŸš€ Pushed to:", f"https://huggingface.co/{model_repo}")
362
- break
363
- except Exception as e:
364
- print(f"⚠️ Hub push failed: {e}")
365
- if attempt < 2:
366
- time.sleep(30)
367
- else:
368
- print("πŸ’Ύ Model saved locally (push failed)")
369
 
370
  if __name__ == "__main__":
371
- main()
 
1
+ import os
2
+ import collections
3
+ import string
4
+ import re
5
+ import numpy as np
6
 
7
+ from datasets import load_dataset, load_metric
 
 
 
 
 
8
  from transformers import (
9
+ DebertaTokenizerFast,
10
+ DebertaForQuestionAnswering,
11
+ Trainer,
12
+ TrainingArguments,
13
+ default_data_collator,
14
  )
15
+ from peft import LoraConfig, get_peft_model
 
16
  from huggingface_hub import login
17
 
18
+ # Load your HF token securely from environment variable
19
+ hf_token = os.environ.get("roberta_token")
20
+ if hf_token:
21
+ login(token=hf_token)
22
+ else:
23
+ print("Warning: HF token not found in environment variable 'roberta_token'. Push to hub may fail.")
24
+
25
+ metric = load_metric("squad")
26
+
27
+ def normalize_answer(s):
28
+ """Lower text and remove punctuation/articles/extra whitespace"""
29
+ def remove_articles(text):
30
+ return re.sub(r'\b(a|an|the)\b', ' ', text)
31
+ def white_space_fix(text):
32
+ return ' '.join(text.split())
33
+ def remove_punc(text):
34
+ exclude = set(string.punctuation)
35
+ return ''.join(ch for ch in text if ch not in exclude)
36
+ def lower(text):
37
+ return text.lower()
38
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
39
+
40
+ def prepare_train_features(examples, tokenizer, max_length=512, doc_stride=128):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  tokenized_examples = tokenizer(
42
+ examples["question"],
43
+ examples["context"],
44
  truncation="only_second",
45
+ max_length=max_length,
46
+ stride=doc_stride,
47
  return_overflowing_tokens=True,
48
  return_offsets_mapping=True,
49
  padding="max_length",
50
  )
 
51
  sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
52
  offset_mapping = tokenized_examples.pop("offset_mapping")
53
+
54
  start_positions = []
55
  end_positions = []
56
+
57
  for i, offsets in enumerate(offset_mapping):
58
+ input_ids = tokenized_examples["input_ids"][i]
59
+ cls_index = input_ids.index(tokenizer.cls_token_id)
60
  sample_index = sample_mapping[i]
61
  answers = examples["answers"][sample_index]
62
+
63
+ if len(answers["answer_start"]) == 0:
64
  start_positions.append(cls_index)
65
  end_positions.append(cls_index)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  else:
67
+ start_char = answers["answer_start"][0]
68
+ end_char = start_char + len(answers["text"][0])
69
+ sequence_ids = tokenized_examples.sequence_ids(i)
70
+ token_start_index = 0
71
+ while sequence_ids[token_start_index] != 1:
72
+ token_start_index += 1
73
+ token_end_index = len(input_ids) - 1
74
+ while sequence_ids[token_end_index] != 1:
75
+ token_end_index -= 1
76
+ if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
77
+ start_positions.append(cls_index)
78
+ end_positions.append(cls_index)
79
+ else:
80
+ while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
81
+ token_start_index += 1
82
+ start_positions.append(token_start_index - 1)
83
+ while offsets[token_end_index][1] >= end_char:
84
+ token_end_index -= 1
85
+ end_positions.append(token_end_index + 1)
86
+
87
  tokenized_examples["start_positions"] = start_positions
88
  tokenized_examples["end_positions"] = end_positions
 
89
  return tokenized_examples
90
 
91
+ def postprocess_qa_predictions(examples, features, raw_predictions, n_best_size=20, max_answer_length=30):
92
+ all_start_logits, all_end_logits = raw_predictions
93
+ example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
94
+ features_per_example = collections.defaultdict(list)
95
+ for i, feature in enumerate(features):
96
+ features_per_example[example_id_to_index[feature["example_id"]]].append(i)
97
+
98
+ predictions = collections.OrderedDict()
99
+ for example_index, example in enumerate(examples):
100
+ feature_indices = features_per_example[example_index]
101
+ min_null_score = None
102
+ valid_answers = []
103
+ context = example["context"]
104
+ for feature_index in feature_indices:
105
+ start_logits = all_start_logits[feature_index]
106
+ end_logits = all_end_logits[feature_index]
107
+ offsets = features[feature_index]["offset_mapping"]
108
+ cls_index = features[feature_index]["input_ids"].index(features[feature_index]["cls_token_id"])
109
+ feature_null_score = start_logits[cls_index] + end_logits[cls_index]
110
+ if min_null_score is None or min_null_score > feature_null_score:
111
+ min_null_score = feature_null_score
112
+ start_indexes = np.argsort(start_logits)[-1: -n_best_size - 1: -1].tolist()
113
+ end_indexes = np.argsort(end_logits)[-1: -n_best_size - 1: -1].tolist()
114
+ for start_index in start_indexes:
115
+ for end_index in end_indexes:
116
+ if (
117
+ start_index >= len(offsets)
118
+ or end_index >= len(offsets)
119
+ or offsets[start_index] is None
120
+ or offsets[end_index] is None
121
+ ):
122
+ continue
123
+ if end_index < start_index or end_index - start_index + 1 > max_answer_length:
124
+ continue
125
+ start_char = offsets[start_index][0]
126
+ end_char = offsets[end_index][1]
127
+ valid_answers.append(
128
+ {"score": start_logits[start_index] + end_logits[end_index], "text": context[start_char:end_char]}
129
+ )
130
+ best_answer = max(valid_answers, key=lambda x: x["score"]) if valid_answers else {"text": "", "score": 0.0}
131
+ predictions[example["id"]] = best_answer["text"]
132
+ return predictions
 
 
133
 
134
+ def compute_metrics(p, tokenizer, examples, features):
135
+ predictions = postprocess_qa_predictions(examples, features, p.predictions)
136
+ formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
137
+ references = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples]
138
+ return metric.compute(predictions=formatted_predictions, references=references)
139
 
140
  def main():
141
+ model_name = "microsoft/deberta-xlarge"
142
+ output_dir = "./deberta-lora-cuad-finetuned"
143
+
144
+ datasets = load_dataset("theatticusproject/cuad-qa")
145
+ tokenizer = DebertaTokenizerFast.from_pretrained(model_name)
146
+ model = DebertaForQuestionAnswering.from_pretrained(model_name)
147
+
148
+ # LoRA config: tune rank and dropout as needed
149
+ lora_config = LoraConfig(
150
+ r=8,
151
+ lora_alpha=32,
152
+ target_modules=["query", "value"], # Adjust for DeBERTa internals as needed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  lora_dropout=0.1,
154
+ bias="none",
155
+ task_type="QUESTION_ANSWERING"
156
  )
157
+ model = get_peft_model(model, lora_config)
 
 
 
 
 
 
 
158
 
159
+ train_dataset = datasets["train"].map(
160
+ lambda examples: prepare_train_features(examples, tokenizer),
161
+ batched=True,
162
+ remove_columns=datasets["train"].column_names,
163
+ )
164
+ val_dataset = datasets["validation"].map(
165
+ lambda examples: prepare_train_features(examples, tokenizer),
166
+ batched=True,
167
+ remove_columns=datasets["validation"].column_names,
168
+ )
169
 
170
+ training_args = TrainingArguments(
171
+ output_dir=output_dir,
172
+ evaluation_strategy="steps",
173
+ eval_steps=500,
174
+ save_steps=500,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  save_total_limit=2,
176
+ learning_rate=3e-4, # LoRA usually supports higher LR
177
+ per_device_train_batch_size=1,
178
+ per_device_eval_batch_size=1,
179
+ num_train_epochs=3,
180
+ weight_decay=0.0,
181
+ logging_dir=f"{output_dir}/logs",
182
+ logging_steps=100,
183
+ load_best_model_at_end=True,
184
+ metric_for_best_model="eval_f1",
185
+ greater_is_better=True,
186
+ fp16=True,
187
+ push_to_hub=True,
188
+ hub_model_id="AvocadoMuffin/deberta_finetuned_qa_lora",
189
+ hub_strategy="checkpoint",
190
  )
191
 
192
  trainer = Trainer(
193
  model=model,
194
+ args=training_args,
195
+ train_dataset=train_dataset,
196
+ eval_dataset=val_dataset,
197
+ tokenizer=tokenizer,
198
  data_collator=default_data_collator,
199
+ compute_metrics=lambda p: compute_metrics(p, tokenizer, datasets["validation"], val_dataset),
200
  )
201
 
202
+ trainer.train()
203
+ trainer.push_to_hub()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
  if __name__ == "__main__":
206
+ main()