GindaChen commited on
Commit
3b24157
·
verified ·
1 Parent(s): b54f411

Upload folder using huggingface_hub

Browse files
attnserver.run_attnserver.slurm.sh.343188.out.log CHANGED
@@ -120316,3 +120316,244 @@ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120316
  batch tensor after cp: position_ids torch.Size([1, 16384])
120317
  Start exporting trace 1
120318
  Done exporting trace 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120316
  batch tensor after cp: position_ids torch.Size([1, 16384])
120317
  Start exporting trace 1
120318
  Done exporting trace 1
120319
+ [2025-06-21 20:56:05] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 147698.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
120320
+ batch tensor: tokens torch.Size([1, 131072])
120321
+ batch tensor: labels torch.Size([1, 131072])
120322
+ batch tensor: loss_mask torch.Size([1, 131072])
120323
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120324
+ batch tensor: position_ids torch.Size([1, 131072])
120325
+ batch tensor after cp: tokens torch.Size([1, 16384])
120326
+ batch tensor after cp: labels torch.Size([1, 16384])
120327
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120328
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120329
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120330
+ batch tensor: tokens torch.Size([1, 131072])
120331
+ batch tensor: labels torch.Size([1, 131072])
120332
+ batch tensor: loss_mask torch.Size([1, 131072])
120333
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120334
+ batch tensor: position_ids torch.Size([1, 131072])
120335
+ batch tensor after cp: tokens torch.Size([1, 16384])
120336
+ batch tensor after cp: labels torch.Size([1, 16384])
120337
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120338
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120339
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120340
+ batch tensor: tokens torch.Size([1, 131072])
120341
+ batch tensor: labels torch.Size([1, 131072])
120342
+ batch tensor: loss_mask torch.Size([1, 131072])
120343
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120344
+ batch tensor: position_ids torch.Size([1, 131072])
120345
+ batch tensor after cp: tokens torch.Size([1, 16384])
120346
+ batch tensor after cp: labels torch.Size([1, 16384])
120347
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120348
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120349
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120350
+ batch tensor: tokens torch.Size([1, 131072])
120351
+ batch tensor: labels torch.Size([1, 131072])
120352
+ batch tensor: loss_mask torch.Size([1, 131072])
120353
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120354
+ batch tensor: position_ids torch.Size([1, 131072])
120355
+ batch tensor after cp: tokens torch.Size([1, 16384])
120356
+ batch tensor after cp: labels torch.Size([1, 16384])
120357
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120358
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120359
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120360
+ batch tensor: tokens torch.Size([1, 131072])
120361
+ batch tensor: labels torch.Size([1, 131072])
120362
+ batch tensor: loss_mask torch.Size([1, 131072])
120363
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120364
+ batch tensor: position_ids torch.Size([1, 131072])
120365
+ batch tensor after cp: tokens torch.Size([1, 16384])
120366
+ batch tensor after cp: labels torch.Size([1, 16384])
120367
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120368
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120369
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120370
+ batch tensor: tokens torch.Size([1, 131072])
120371
+ batch tensor: labels torch.Size([1, 131072])
120372
+ batch tensor: loss_mask torch.Size([1, 131072])
120373
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120374
+ batch tensor: position_ids torch.Size([1, 131072])
120375
+ batch tensor after cp: tokens torch.Size([1, 16384])
120376
+ batch tensor after cp: labels torch.Size([1, 16384])
120377
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120378
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120379
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120380
+ batch tensor: tokens torch.Size([1, 131072])
120381
+ batch tensor: labels torch.Size([1, 131072])
120382
+ batch tensor: loss_mask torch.Size([1, 131072])
120383
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120384
+ batch tensor: position_ids torch.Size([1, 131072])
120385
+ batch tensor after cp: tokens torch.Size([1, 16384])
120386
+ batch tensor after cp: labels torch.Size([1, 16384])
120387
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120388
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120389
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120390
+ batch tensor: tokens torch.Size([1, 131072])
120391
+ batch tensor: labels torch.Size([1, 131072])
120392
+ batch tensor: loss_mask torch.Size([1, 131072])
120393
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120394
+ batch tensor: position_ids torch.Size([1, 131072])
120395
+ batch tensor after cp: tokens torch.Size([1, 16384])
120396
+ batch tensor after cp: labels torch.Size([1, 16384])
120397
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120398
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120399
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120400
+ batch tensor: tokens torch.Size([1, 131072])
120401
+ batch tensor: labels torch.Size([1, 131072])
120402
+ batch tensor: loss_mask torch.Size([1, 131072])
120403
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120404
+ batch tensor: position_ids torch.Size([1, 131072])
120405
+ batch tensor after cp: tokens torch.Size([1, 16384])
120406
+ batch tensor after cp: labels torch.Size([1, 16384])
120407
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120408
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120409
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120410
+ batch tensor: tokens torch.Size([1, 131072])
120411
+ batch tensor: labels torch.Size([1, 131072])
120412
+ batch tensor: loss_mask torch.Size([1, 131072])
120413
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120414
+ batch tensor: position_ids torch.Size([1, 131072])
120415
+ batch tensor after cp: tokens torch.Size([1, 16384])
120416
+ batch tensor after cp: labels torch.Size([1, 16384])
120417
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120418
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120419
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120420
+ batch tensor: tokens torch.Size([1, 131072])
120421
+ batch tensor: labels torch.Size([1, 131072])
120422
+ batch tensor: loss_mask torch.Size([1, 131072])
120423
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120424
+ batch tensor: position_ids torch.Size([1, 131072])
120425
+ batch tensor after cp: tokens torch.Size([1, 16384])
120426
+ batch tensor after cp: labels torch.Size([1, 16384])
120427
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120428
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120429
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120430
+ batch tensor: tokens torch.Size([1, 131072])
120431
+ batch tensor: labels torch.Size([1, 131072])
120432
+ batch tensor: loss_mask torch.Size([1, 131072])
120433
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120434
+ batch tensor: position_ids torch.Size([1, 131072])
120435
+ batch tensor after cp: tokens torch.Size([1, 16384])
120436
+ batch tensor after cp: labels torch.Size([1, 16384])
120437
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120438
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120439
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120440
+ batch tensor: tokens torch.Size([1, 131072])
120441
+ batch tensor: labels torch.Size([1, 131072])
120442
+ batch tensor: loss_mask torch.Size([1, 131072])
120443
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120444
+ batch tensor: position_ids torch.Size([1, 131072])
120445
+ batch tensor after cp: tokens torch.Size([1, 16384])
120446
+ batch tensor after cp: labels torch.Size([1, 16384])
120447
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120448
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120449
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120450
+ batch tensor: tokens torch.Size([1, 131072])
120451
+ batch tensor: labels torch.Size([1, 131072])
120452
+ batch tensor: loss_mask torch.Size([1, 131072])
120453
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120454
+ batch tensor: position_ids torch.Size([1, 131072])
120455
+ batch tensor after cp: tokens torch.Size([1, 16384])
120456
+ batch tensor after cp: labels torch.Size([1, 16384])
120457
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120458
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120459
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120460
+ batch tensor: tokens torch.Size([1, 131072])
120461
+ batch tensor: labels torch.Size([1, 131072])
120462
+ batch tensor: loss_mask torch.Size([1, 131072])
120463
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120464
+ batch tensor: position_ids torch.Size([1, 131072])
120465
+ batch tensor after cp: tokens torch.Size([1, 16384])
120466
+ batch tensor after cp: labels torch.Size([1, 16384])
120467
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120468
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120469
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120470
+ batch tensor: tokens torch.Size([1, 131072])
120471
+ batch tensor: labels torch.Size([1, 131072])
120472
+ batch tensor: loss_mask torch.Size([1, 131072])
120473
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120474
+ batch tensor: position_ids torch.Size([1, 131072])
120475
+ batch tensor after cp: tokens torch.Size([1, 16384])
120476
+ batch tensor after cp: labels torch.Size([1, 16384])
120477
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120478
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120479
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120480
+ batch tensor: tokens torch.Size([1, 131072])
120481
+ batch tensor: labels torch.Size([1, 131072])
120482
+ batch tensor: loss_mask torch.Size([1, 131072])
120483
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120484
+ batch tensor: position_ids torch.Size([1, 131072])
120485
+ batch tensor after cp: tokens torch.Size([1, 16384])
120486
+ batch tensor after cp: labels torch.Size([1, 16384])
120487
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120488
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120489
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120490
+ batch tensor: tokens torch.Size([1, 131072])
120491
+ batch tensor: labels torch.Size([1, 131072])
120492
+ batch tensor: loss_mask torch.Size([1, 131072])
120493
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120494
+ batch tensor: position_ids torch.Size([1, 131072])
120495
+ batch tensor after cp: tokens torch.Size([1, 16384])
120496
+ batch tensor after cp: labels torch.Size([1, 16384])
120497
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120498
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120499
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120500
+ batch tensor: tokens torch.Size([1, 131072])
120501
+ batch tensor: labels torch.Size([1, 131072])
120502
+ batch tensor: loss_mask torch.Size([1, 131072])
120503
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120504
+ batch tensor: position_ids torch.Size([1, 131072])
120505
+ batch tensor after cp: tokens torch.Size([1, 16384])
120506
+ batch tensor after cp: labels torch.Size([1, 16384])
120507
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120508
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120509
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120510
+ batch tensor: tokens torch.Size([1, 131072])
120511
+ batch tensor: labels torch.Size([1, 131072])
120512
+ batch tensor: loss_mask torch.Size([1, 131072])
120513
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120514
+ batch tensor: position_ids torch.Size([1, 131072])
120515
+ batch tensor after cp: tokens torch.Size([1, 16384])
120516
+ batch tensor after cp: labels torch.Size([1, 16384])
120517
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120518
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120519
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120520
+ batch tensor: tokens torch.Size([1, 131072])
120521
+ batch tensor: labels torch.Size([1, 131072])
120522
+ batch tensor: loss_mask torch.Size([1, 131072])
120523
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120524
+ batch tensor: position_ids torch.Size([1, 131072])
120525
+ batch tensor after cp: tokens torch.Size([1, 16384])
120526
+ batch tensor after cp: labels torch.Size([1, 16384])
120527
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120528
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120529
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120530
+ batch tensor: tokens torch.Size([1, 131072])
120531
+ batch tensor: labels torch.Size([1, 131072])
120532
+ batch tensor: loss_mask torch.Size([1, 131072])
120533
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120534
+ batch tensor: position_ids torch.Size([1, 131072])
120535
+ batch tensor after cp: tokens torch.Size([1, 16384])
120536
+ batch tensor after cp: labels torch.Size([1, 16384])
120537
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120538
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120539
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120540
+ batch tensor: tokens torch.Size([1, 131072])
120541
+ batch tensor: labels torch.Size([1, 131072])
120542
+ batch tensor: loss_mask torch.Size([1, 131072])
120543
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120544
+ batch tensor: position_ids torch.Size([1, 131072])
120545
+ batch tensor after cp: tokens torch.Size([1, 16384])
120546
+ batch tensor after cp: labels torch.Size([1, 16384])
120547
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120548
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120549
+ batch tensor after cp: position_ids torch.Size([1, 16384])
120550
+ batch tensor: tokens torch.Size([1, 131072])
120551
+ batch tensor: labels torch.Size([1, 131072])
120552
+ batch tensor: loss_mask torch.Size([1, 131072])
120553
+ batch tensor: attention_mask torch.Size([1, 1, 131072, 131072])
120554
+ batch tensor: position_ids torch.Size([1, 131072])
120555
+ batch tensor after cp: tokens torch.Size([1, 16384])
120556
+ batch tensor after cp: labels torch.Size([1, 16384])
120557
+ batch tensor after cp: loss_mask torch.Size([1, 16384])
120558
+ batch tensor after cp: attention_mask torch.Size([1, 1, 16384, 131072])
120559
+ batch tensor after cp: position_ids torch.Size([1, 16384])
attnserver.run_attnserver.slurm.sh.343194.err.log CHANGED
The diff for this file is too large to render. See raw diff
 
attnserver.run_attnserver.slurm.sh.343194.out.log CHANGED
@@ -15308,3 +15308,1536 @@ CHECKPOINT_PATH: gpt-checkpoint
15308
  PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
15309
  --------------------------------
15310
  /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15308
  PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
15309
  --------------------------------
15310
  /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
15311
+ INFO:megatron.training.initialize:Setting logging level to 0
15312
+ INFO:megatron.training.initialize:Setting logging level to 0
15313
+ using world size: 64, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0
15314
+ Number of virtual stages per pipeline stage: None
15315
+ WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used
15316
+ using torch.float16 for parameters ...
15317
+ ------------------------ arguments ------------------------
15318
+ account_for_embedding_in_pipeline_split ......... False
15319
+ account_for_loss_in_pipeline_split .............. False
15320
+ accumulate_allreduce_grads_in_fp32 .............. False
15321
+ adam_beta1 ...................................... 0.9
15322
+ adam_beta2 ...................................... 0.999
15323
+ adam_eps ........................................ 1e-08
15324
+ add_bias_linear ................................. True
15325
+ add_position_embedding .......................... True
15326
+ add_qkv_bias .................................... True
15327
+ adlr_autoresume ................................. False
15328
+ adlr_autoresume_interval ........................ 1000
15329
+ align_grad_reduce ............................... True
15330
+ align_param_gather .............................. False
15331
+ app_tag_run_name ................................ None
15332
+ app_tag_run_version ............................. 0.0.0
15333
+ apply_layernorm_1p .............................. False
15334
+ apply_query_key_layer_scaling ................... False
15335
+ apply_residual_connection_post_layernorm ........ False
15336
+ apply_rope_fusion ............................... False
15337
+ async_save ...................................... None
15338
+ async_tensor_model_parallel_allreduce ........... True
15339
+ attention_backend ............................... AttnBackend.auto
15340
+ attention_dropout ............................... 0.1
15341
+ attention_softmax_in_fp32 ....................... False
15342
+ auto_detect_ckpt_format ......................... False
15343
+ barrier_with_L1_time ............................ True
15344
+ bert_binary_head ................................ True
15345
+ bert_embedder_type .............................. megatron
15346
+ bert_load ....................................... None
15347
+ bf16 ............................................ False
15348
+ bias_dropout_fusion ............................. True
15349
+ bias_gelu_fusion ................................ True
15350
+ bias_swiglu_fusion .............................. True
15351
+ biencoder_projection_dim ........................ 0
15352
+ biencoder_shared_query_context_model ............ False
15353
+ block_data_path ................................. None
15354
+ calc_ft_timeouts ................................ False
15355
+ calculate_per_token_loss ........................ False
15356
+ check_for_large_grads ........................... False
15357
+ check_for_nan_in_loss_and_grad .................. False
15358
+ check_for_spiky_loss ............................ False
15359
+ check_weight_hash_across_dp_replicas_interval ... None
15360
+ ckpt_assume_constant_structure .................. False
15361
+ ckpt_convert_format ............................. None
15362
+ ckpt_convert_save ............................... None
15363
+ ckpt_convert_update_legacy_dist_opt_format ...... False
15364
+ ckpt_format ..................................... torch_dist
15365
+ ckpt_fully_parallel_load ........................ False
15366
+ ckpt_fully_parallel_save ........................ True
15367
+ ckpt_fully_parallel_save_deprecated ............. False
15368
+ ckpt_step ....................................... None
15369
+ classes_fraction ................................ 1.0
15370
+ clip_grad ....................................... 1.0
15371
+ INFO:megatron.training.initialize:Setting logging level to 0
15372
+ clone_scatter_output_in_embedding ............... True
15373
+ config_logger_dir ...............................
15374
+ consumed_train_samples .......................... 0
15375
+ consumed_valid_samples .......................... 0
15376
+ context_parallel_size ........................... 8
15377
+ cp_comm_type .................................... ['p2p']
15378
+ create_attention_mask_in_dataloader ............. True
15379
+ cross_entropy_fusion_impl ....................... native
15380
+ cross_entropy_loss_fusion ....................... False
15381
+ cuda_graph_scope ................................ full
15382
+ cuda_graph_warmup_steps ......................... 3
15383
+ data_args_path .................................. None
15384
+ data_cache_path ................................. None
15385
+ data_parallel_random_init ....................... False
15386
+ data_parallel_sharding_strategy ................. no_shard
15387
+ data_parallel_size .............................. 1
15388
+ data_path ....................................... None
15389
+ data_per_class_fraction ......................... 1.0
15390
+ data_sharding ................................... True
15391
+ dataloader_type ................................. single
15392
+ ddp_average_in_collective ....................... False
15393
+ ddp_bucket_size ................................. None
15394
+ ddp_num_buckets ................................. None
15395
+ ddp_pad_buckets_for_high_nccl_busbw ............. False
15396
+ decoder_first_pipeline_num_layers ............... None
15397
+ decoder_last_pipeline_num_layers ................ None
15398
+ decoder_num_layers .............................. None
15399
+ decoder_seq_length .............................. None
15400
+ decoupled_lr .................................... None
15401
+ decoupled_min_lr ................................ None
15402
+ decrease_batch_size_if_needed ................... False
15403
+ defer_embedding_wgrad_compute ................... False
15404
+ deprecated_use_mcore_models ..................... False
15405
+ deterministic_mode .............................. False
15406
+ dino_bottleneck_size ............................ 256
15407
+ dino_freeze_last_layer .......................... 1
15408
+ dino_head_hidden_size ........................... 2048
15409
+ dino_local_crops_number ......................... 10
15410
+ dino_local_img_size ............................. 96
15411
+ dino_norm_last_layer ............................ False
15412
+ dino_teacher_temp ............................... 0.07
15413
+ dino_warmup_teacher_temp ........................ 0.04
15414
+ dino_warmup_teacher_temp_epochs ................. 30
15415
+ disable_bf16_reduced_precision_matmul ........... False
15416
+ INFO:megatron.training.initialize:Setting logging level to 0
15417
+ disable_mamba_mem_eff_path ...................... False
15418
+ disable_straggler_on_startup .................... False
15419
+ dist_ckpt_format_deprecated ..................... None
15420
+ dist_ckpt_strictness ............................ assume_ok_unexpected
15421
+ distribute_saved_activations .................... False
15422
+ distributed_backend ............................. nccl
15423
+ distributed_timeout_minutes ..................... 10
15424
+ embedding_path .................................. None
15425
+ empty_unused_memory_level ....................... 0
15426
+ enable_cuda_graph ............................... False
15427
+ enable_ft_package ............................... False
15428
+ enable_gloo_process_groups ...................... True
15429
+ enable_msc ...................................... True
15430
+ enable_one_logger ............................... True
15431
+ encoder_num_layers .............................. 2
15432
+ encoder_pipeline_model_parallel_size ............ 0
15433
+ encoder_seq_length .............................. 16384
15434
+ encoder_tensor_model_parallel_size .............. 0
15435
+ end_weight_decay ................................ 0.1
15436
+ eod_mask_loss ................................... False
15437
+ error_injection_rate ............................ 0
15438
+ error_injection_type ............................ transient_error
15439
+ eval_interval ................................... 16
15440
+ eval_iters ...................................... 1
15441
+ evidence_data_path .............................. None
15442
+ exit_duration_in_mins ........................... None
15443
+ exit_interval ................................... None
15444
+ exit_on_missing_checkpoint ...................... False
15445
+ exit_signal_handler ............................. False
15446
+ exp_avg_dtype ................................... torch.float32
15447
+ exp_avg_sq_dtype ................................ torch.float32
15448
+ expert_model_parallel_size ...................... 1
15449
+ expert_tensor_parallel_size ..................... 8
15450
+ external_cuda_graph ............................. False
15451
+ ffn_hidden_size ................................. 16384
15452
+ finetune ........................................ False
15453
+ first_last_layers_bf16 .......................... False
15454
+ flash_decode .................................... False
15455
+ fp16 ............................................ True
15456
+ fp16_lm_cross_entropy ........................... False
15457
+ fp32_residual_connection ........................ False
15458
+ fp8 ............................................. None
15459
+ fp8_amax_compute_algo ........................... most_recent
15460
+ fp8_amax_history_len ............................ 1
15461
+ fp8_interval .................................... 1
15462
+ fp8_margin ...................................... 0
15463
+ fp8_param_gather ................................ False
15464
+ fp8_recipe ...................................... delayed
15465
+ fp8_wgrad ....................................... True
15466
+ fsdp_double_buffer .............................. False
15467
+ global_batch_size ............................... 1
15468
+ grad_reduce_in_bf16 ............................. False
15469
+ gradient_accumulation_fusion .................... True
15470
+ gradient_reduce_div_fusion ...................... True
15471
+ group_query_attention ........................... True
15472
+ head_lr_mult .................................... 1.0
15473
+ heterogeneous_layers_config_encoded_json ........ None
15474
+ heterogeneous_layers_config_path ................ None
15475
+ hidden_dropout .................................. 0.1
15476
+ hidden_size ..................................... 4096
15477
+ hierarchical_context_parallel_sizes ............. None
15478
+ high_priority_stream_groups ..................... []
15479
+ hybrid_attention_ratio .......................... 0.0
15480
+ hybrid_mlp_ratio ................................ 0.0
15481
+ hybrid_override_pattern ......................... None
15482
+ hysteresis ...................................... 2
15483
+ ict_head_size ................................... None
15484
+ ict_load ........................................ None
15485
+ img_h ........................................... 224
15486
+ img_w ........................................... 224
15487
+ indexer_batch_size .............................. 128
15488
+ indexer_log_interval ............................ 1000
15489
+ inference_batch_times_seqlen_threshold .......... -1
15490
+ inference_dynamic_batching ...................... False
15491
+ inference_dynamic_batching_buffer_guaranteed_fraction 0.2
15492
+ inference_dynamic_batching_buffer_overflow_factor None
15493
+ inference_dynamic_batching_buffer_size_gb ....... 40.0
15494
+ inference_dynamic_batching_chunk_size ........... 256
15495
+ inference_dynamic_batching_max_requests_override None
15496
+ inference_dynamic_batching_max_tokens_override .. None
15497
+ inference_max_batch_size ........................ 8
15498
+ inference_max_seq_length ........................ 2560
15499
+ inference_rng_tracker ........................... False
15500
+ init_method_std ................................. 0.02
15501
+ init_method_xavier_uniform ...................... False
15502
+ init_model_with_meta_device ..................... False
15503
+ initial_loss_scale .............................. 4294967296
15504
+ inprocess_active_world_size ..................... 64
15505
+ inprocess_barrier_timeout ....................... 120
15506
+ inprocess_completion_timeout .................... 120
15507
+ inprocess_empty_cuda_cache ...................... False
15508
+ inprocess_granularity ........................... node
15509
+ inprocess_hard_timeout .......................... 90
15510
+ inprocess_heartbeat_interval .................... 30
15511
+ inprocess_heartbeat_timeout ..................... 60
15512
+ INFO:megatron.training.initialize:Setting logging level to 0
15513
+ inprocess_last_call_wait ........................ 1
15514
+ inprocess_max_iterations ........................ None
15515
+ inprocess_monitor_process_interval .............. 1.0
15516
+ inprocess_monitor_thread_interval ............... 1.0
15517
+ inprocess_progress_watchdog_interval ............ 1.0
15518
+ inprocess_restart ............................... False
15519
+ inprocess_soft_timeout .......................... 60
15520
+ inprocess_termination_grace_time ................ 1
15521
+ is_hybrid_model ................................. False
15522
+ INFO:megatron.training.initialize:Setting logging level to 0
15523
+ iter_per_epoch .................................. 1250
15524
+ iterations_to_skip .............................. []
15525
+ keep_fp8_transpose_cache_when_using_custom_fsdp . False
15526
+ kv_channels ..................................... 64
15527
+ kv_lora_rank .................................... 32
15528
+ lazy_mpu_init ................................... None
15529
+ load ............................................ gpt-checkpoint
15530
+ load_model_opt_format ........................... False
15531
+ local_rank ...................................... 0
15532
+ log_interval .................................... 1
15533
+ log_loss_scale_to_tensorboard ................... True
15534
+ log_memory_to_tensorboard ....................... False
15535
+ log_num_zeros_in_grad ........................... False
15536
+ log_params_norm ................................. False
15537
+ log_progress .................................... False
15538
+ log_straggler ................................... False
15539
+ log_throughput .................................. False
15540
+ log_timers_to_tensorboard ....................... False
15541
+ log_validation_ppl_to_tensorboard ............... False
15542
+ log_world_size_to_tensorboard ................... False
15543
+ logging_level ................................... 0
15544
+ loss_scale ...................................... None
15545
+ loss_scale_window ............................... 1000
15546
+ lr .............................................. 0.0005
15547
+ lr_decay_iters .................................. 150000
15548
+ lr_decay_samples ................................ None
15549
+ lr_decay_style .................................. cosine
15550
+ lr_warmup_fraction .............................. None
15551
+ lr_warmup_init .................................. 0.0
15552
+ lr_warmup_iters ................................. 2
15553
+ lr_warmup_samples ............................... 0
15554
+ lr_wsd_decay_iters .............................. None
15555
+ lr_wsd_decay_samples ............................ None
15556
+ lr_wsd_decay_style .............................. exponential
15557
+ main_grads_dtype ................................ torch.float32
15558
+ main_params_dtype ............................... torch.float32
15559
+ make_vocab_size_divisible_by .................... 128
15560
+ mamba_head_dim .................................. 64
15561
+ mamba_num_groups ................................ 8
15562
+ mamba_num_heads ................................. None
15563
+ mamba_state_dim ................................. 128
15564
+ manual_gc ....................................... False
15565
+ manual_gc_eval .................................. True
15566
+ manual_gc_interval .............................. 0
15567
+ mask_factor ..................................... 1.0
15568
+ mask_prob ....................................... 0.15
15569
+ mask_type ....................................... random
15570
+ masked_softmax_fusion ........................... True
15571
+ max_position_embeddings ......................... 16384
15572
+ max_tokens_to_oom ............................... 12000
15573
+ memory_snapshot_path ............................ snapshot.pickle
15574
+ merge_file ...................................... merges.txt
15575
+ micro_batch_size ................................ 1
15576
+ INFO:megatron.training.initialize:Setting logging level to 0
15577
+ microbatch_group_size_per_vp_stage .............. None
15578
+ mid_level_dataset_surplus ....................... 0.005
15579
+ min_loss_scale .................................. 1.0
15580
+ min_lr .......................................... 0.0
15581
+ mlp_chunks_for_prefill .......................... 1
15582
+ mmap_bin_files .................................. True
15583
+ mock_data ....................................... True
15584
+ moe_apply_probs_on_input ........................ False
15585
+ moe_aux_loss_coeff .............................. 0.0
15586
+ moe_enable_deepep ............................... False
15587
+ moe_expert_capacity_factor ...................... None
15588
+ moe_extended_tp ................................. False
15589
+ moe_ffn_hidden_size ............................. None
15590
+ moe_grouped_gemm ................................ False
15591
+ moe_input_jitter_eps ............................ None
15592
+ moe_layer_freq .................................. 1
15593
+ moe_layer_recompute ............................. False
15594
+ moe_pad_expert_input_to_capacity ................ False
15595
+ moe_per_layer_logging ........................... False
15596
+ moe_permute_fusion .............................. False
15597
+ moe_router_bias_update_rate ..................... 0.001
15598
+ moe_router_dtype ................................ None
15599
+ moe_router_enable_expert_bias ................... False
15600
+ moe_router_force_load_balancing ................. False
15601
+ moe_router_group_topk ........................... None
15602
+ moe_router_load_balancing_type .................. aux_loss
15603
+ moe_router_num_groups ........................... None
15604
+ moe_router_padding_for_fp8 ...................... False
15605
+ moe_router_pre_softmax .......................... False
15606
+ moe_router_score_function ....................... softmax
15607
+ moe_router_topk ................................. 2
15608
+ moe_router_topk_scaling_factor .................. None
15609
+ moe_shared_expert_intermediate_size ............. None
15610
+ moe_shared_expert_overlap ....................... False
15611
+ moe_token_dispatcher_type ....................... allgather
15612
+ moe_token_drop_policy ........................... probs
15613
+ moe_use_legacy_grouped_gemm ..................... False
15614
+ INFO:megatron.training.initialize:Setting logging level to 0
15615
+ moe_use_upcycling ............................... False
15616
+ moe_z_loss_coeff ................................ None
15617
+ mrope_section ................................... None
15618
+ mscale .......................................... 1.0
15619
+ mscale_all_dim .................................. 1.0
15620
+ mtp_loss_scaling_factor ......................... 0.1
15621
+ mtp_num_layers .................................. None
15622
+ multi_latent_attention .......................... False
15623
+ nccl_all_reduce_for_prefill ..................... False
15624
+ nccl_communicator_config_path ................... None
15625
+ nccl_ub ......................................... False
15626
+ no_load_optim ................................... None
15627
+ no_load_rng ..................................... None
15628
+ no_persist_layer_norm ........................... False
15629
+ no_rope_freq .................................... None
15630
+ no_save_optim ................................... None
15631
+ no_save_rng ..................................... None
15632
+ non_persistent_ckpt_type ........................ None
15633
+ non_persistent_global_ckpt_dir .................. None
15634
+ non_persistent_local_ckpt_algo .................. fully_parallel
15635
+ non_persistent_local_ckpt_dir ................... None
15636
+ non_persistent_save_interval .................... None
15637
+ norm_epsilon .................................... 1e-05
15638
+ normalization ................................... LayerNorm
15639
+ num_attention_heads ............................. 64
15640
+ num_channels .................................... 3
15641
+ num_classes ..................................... 1000
15642
+ num_dataset_builder_threads ..................... 1
15643
+ num_distributed_optimizer_instances ............. 1
15644
+ num_experts ..................................... None
15645
+ num_layers ...................................... 2
15646
+ INFO:megatron.training.initialize:Setting logging level to 0
15647
+ num_layers_at_end_in_bf16 ....................... 1
15648
+ num_layers_at_start_in_bf16 ..................... 1
15649
+ num_layers_per_virtual_pipeline_stage ........... None
15650
+ num_query_groups ................................ 16
15651
+ num_virtual_stages_per_pipeline_rank ............ None
15652
+ num_workers ..................................... 2
15653
+ object_storage_cache_path ....................... None
15654
+ one_logger_async ................................ False
15655
+ one_logger_project .............................. megatron-lm
15656
+ one_logger_run_name ............................. None
15657
+ onnx_safe ....................................... None
15658
+ openai_gelu ..................................... False
15659
+ optimizer ....................................... adam
15660
+ optimizer_cpu_offload ........................... False
15661
+ optimizer_offload_fraction ...................... 1.0
15662
+ output_bert_embeddings .......................... False
15663
+ overlap_cpu_optimizer_d2h_h2d ................... False
15664
+ overlap_grad_reduce ............................. False
15665
+ overlap_p2p_comm ................................ False
15666
+ overlap_p2p_comm_warmup_flush ................... False
15667
+ overlap_param_gather ............................ False
15668
+ overlap_param_gather_with_optimizer_step ........ False
15669
+ override_opt_param_scheduler .................... False
15670
+ params_dtype .................................... torch.float16
15671
+ patch_dim ....................................... 16
15672
+ per_split_data_args_path ........................ None
15673
+ perform_initialization .......................... True
15674
+ pin_cpu_grads ................................... True
15675
+ pin_cpu_params .................................. True
15676
+ pipeline_model_parallel_comm_backend ............ None
15677
+ pipeline_model_parallel_size .................... 1
15678
+ pipeline_model_parallel_split_rank .............. None
15679
+ position_embedding_type ......................... learned_absolute
15680
+ pretrained_checkpoint ........................... None
15681
+ profile ......................................... False
15682
+ profile_ranks ................................... [0]
15683
+ profile_step_end ................................ 12
15684
+ profile_step_start .............................. 10
15685
+ q_lora_rank ..................................... None
15686
+ qk_head_dim ..................................... 128
15687
+ qk_l2_norm ...................................... False
15688
+ qk_layernorm .................................... False
15689
+ qk_pos_emb_head_dim ............................. 64
15690
+ query_in_block_prob ............................. 0.1
15691
+ rampup_batch_size ............................... None
15692
+ rank ............................................ 0
15693
+ recompute_granularity ........................... None
15694
+ recompute_method ................................ None
15695
+ recompute_modules ............................... None
15696
+ recompute_num_layers ............................ None
15697
+ record_memory_history ........................... False
15698
+ relative_attention_max_distance ................. 128
15699
+ relative_attention_num_buckets .................. 32
15700
+ replication ..................................... False
15701
+ replication_factor .............................. 2
15702
+ replication_jump ................................ None
15703
+ rerun_mode ...................................... disabled
15704
+ reset_attention_mask ............................ False
15705
+ reset_position_ids .............................. False
15706
+ result_rejected_tracker_filename ................ None
15707
+ retriever_report_topk_accuracies ................ []
15708
+ retriever_score_scaling ......................... False
15709
+ retriever_seq_length ............................ 256
15710
+ retro_add_retriever ............................. False
15711
+ retro_attention_gate ............................ 1
15712
+ retro_cyclic_train_iters ........................ None
15713
+ retro_encoder_attention_dropout ................. 0.1
15714
+ retro_encoder_hidden_dropout .................... 0.1
15715
+ retro_encoder_layers ............................ 2
15716
+ retro_num_neighbors ............................. 2
15717
+ retro_num_retrieved_chunks ...................... 2
15718
+ retro_project_dir ............................... None
15719
+ retro_verify_neighbor_count ..................... True
15720
+ rope_scaling_factor ............................. 8.0
15721
+ rotary_base ..................................... 10000
15722
+ rotary_interleaved .............................. False
15723
+ rotary_percent .................................. 1.0
15724
+ rotary_scaling_factor ........................... 1.0
15725
+ rotary_seq_len_interpolation_factor ............. None
15726
+ run_workload_inspector_server ................... False
15727
+ sample_rate ..................................... 1.0
15728
+ save ............................................ gpt-checkpoint
15729
+ save_interval ................................... 16
15730
+ scatter_gather_tensors_in_pipeline .............. True
15731
+ seed ............................................ 1234
15732
+ seq_length ...................................... 16384
15733
+ sequence_parallel ............................... False
15734
+ sgd_momentum .................................... 0.9
15735
+ short_seq_prob .................................. 0.1
15736
+ skip_train ...................................... False
15737
+ skipped_train_samples ........................... 0
15738
+ spec ............................................ None
15739
+ split ........................................... None
15740
+ squared_relu .................................... False
15741
+ start_weight_decay .............................. 0.1
15742
+ straggler_ctrlr_port ............................ 65535
15743
+ straggler_minmax_count .......................... 1
15744
+ suggested_communication_unit_size ............... None
15745
+ swiglu .......................................... False
15746
+ swin_backbone_type .............................. tiny
15747
+ symmetric_ar_type ............................... None
15748
+ te_rng_tracker .................................. False
15749
+ tensor_model_parallel_size ...................... 8
15750
+ tensorboard_dir ................................. tensorboard-logs/
15751
+ tensorboard_log_interval ........................ 1
15752
+ tensorboard_queue_size .......................... 1000
15753
+ test_data_path .................................. None
15754
+ test_mode ....................................... False
15755
+ tiktoken_num_special_tokens ..................... 1000
15756
+ tiktoken_pattern ................................ None
15757
+ tiktoken_special_tokens ......................... None
15758
+ timing_log_level ................................ 0
15759
+ timing_log_option ............................... minmax
15760
+ titles_data_path ................................ None
15761
+ tokenizer_model ................................. None
15762
+ tokenizer_type .................................. GPT2BPETokenizer
15763
+ torch_fsdp2_reshard_after_forward ............... True
15764
+ tp_comm_bootstrap_backend ....................... nccl
15765
+ tp_comm_bulk_dgrad .............................. True
15766
+ tp_comm_bulk_wgrad .............................. True
15767
+ tp_comm_overlap ................................. False
15768
+ tp_comm_overlap_ag .............................. True
15769
+ tp_comm_overlap_cfg ............................. None
15770
+ tp_comm_overlap_rs .............................. True
15771
+ tp_comm_overlap_rs_dgrad ........................ False
15772
+ tp_comm_split_ag ................................ True
15773
+ tp_comm_split_rs ................................ True
15774
+ train_data_path ................................. None
15775
+ train_iters ..................................... 10
15776
+ train_samples ................................... None
15777
+ train_sync_interval ............................. None
15778
+ transformer_impl ................................ transformer_engine
15779
+ transformer_pipeline_model_parallel_size ........ 1
15780
+ untie_embeddings_and_output_weights ............. False
15781
+ use_checkpoint_args ............................. False
15782
+ use_checkpoint_opt_param_scheduler .............. False
15783
+ use_cpu_initialization .......................... None
15784
+ use_custom_fsdp ................................. False
15785
+ use_dist_ckpt ................................... True
15786
+ use_dist_ckpt_deprecated ........................ False
15787
+ use_distributed_optimizer ....................... False
15788
+ use_flash_attn .................................. False
15789
+ use_legacy_models ............................... False
15790
+ use_mp_args_from_checkpoint_args ................ False
15791
+ use_one_sent_docs ............................... False
15792
+ use_persistent_ckpt_worker ...................... False
15793
+ use_precision_aware_optimizer ................... False
15794
+ use_pytorch_profiler ............................ False
15795
+ use_ring_exchange_p2p ........................... False
15796
+ use_rope_scaling ................................ False
15797
+ use_rotary_position_embeddings .................. False
15798
+ use_sharp ....................................... False
15799
+ use_tokenizer_model_from_checkpoint_args ........ True
15800
+ use_torch_fsdp2 ................................. False
15801
+ use_torch_optimizer_for_cpu_offload ............. False
15802
+ use_tp_pp_dp_mapping ............................ False
15803
+ v_head_dim ...................................... 128
15804
+ valid_data_path ................................. None
15805
+ variable_seq_lengths ............................ False
15806
+ virtual_pipeline_model_parallel_size ............ None
15807
+ vision_backbone_type ............................ vit
15808
+ vision_pretraining .............................. False
15809
+ vision_pretraining_type ......................... classify
15810
+ vocab_extra_ids ................................. 0
15811
+ vocab_file ...................................... vocab.json
15812
+ vocab_size ...................................... None
15813
+ wandb_exp_name ..................................
15814
+ wandb_project ...................................
15815
+ wandb_save_dir ..................................
15816
+ weight_decay .................................... 0.1
15817
+ weight_decay_incr_style ......................... constant
15818
+ wgrad_deferral_limit ............................ 0
15819
+ world_size ...................................... 64
15820
+ yaml_cfg ........................................ None
15821
+ -------------------- end of arguments ---------------------
15822
+ INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1
15823
+ > building GPT2BPETokenizer tokenizer ...
15824
+ INFO:megatron.training.initialize:Setting logging level to 0
15825
+ INFO:megatron.training.initialize:Setting logging level to 0
15826
+ INFO:megatron.training.initialize:Setting logging level to 0
15827
+ INFO:megatron.training.initialize:Setting logging level to 0
15828
+ INFO:megatron.training.initialize:Setting logging level to 0
15829
+ > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200)
15830
+ INFO:megatron.training.initialize:Setting logging level to 0
15831
+ WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED
15832
+ > initializing torch distributed ...
15833
+ INFO:megatron.training.initialize:Setting logging level to 0
15834
+ INFO:megatron.training.initialize:Setting logging level to 0
15835
+ INFO:megatron.training.initialize:Setting logging level to 0
15836
+ INFO:megatron.training.initialize:Setting logging level to 0
15837
+ WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written.
15838
+ WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it
15839
+ INFO:megatron.training.initialize:Setting logging level to 0
15840
+ INFO:megatron.training.initialize:Setting logging level to 0
15841
+ INFO:megatron.training.initialize:Setting logging level to 0
15842
+ INFO:megatron.training.initialize:Setting logging level to 0
15843
+ INFO:megatron.training.initialize:Setting logging level to 0
15844
+ INFO:megatron.training.initialize:Setting logging level to 0
15845
+ INFO:megatron.training.initialize:Setting logging level to 0
15846
+ INFO:megatron.training.initialize:Setting logging level to 0
15847
+ INFO:megatron.training.initialize:Setting logging level to 0
15848
+ INFO:megatron.training.initialize:Setting logging level to 0
15849
+ INFO:megatron.training.initialize:Setting logging level to 0
15850
+ INFO:megatron.training.initialize:Setting logging level to 0
15851
+ INFO:megatron.training.initialize:Setting logging level to 0
15852
+ INFO:megatron.training.initialize:Setting logging level to 0
15853
+ INFO:megatron.training.initialize:Setting logging level to 0
15854
+ INFO:megatron.training.initialize:Setting logging level to 0
15855
+ INFO:megatron.training.initialize:Setting logging level to 0
15856
+ INFO:megatron.training.initialize:Setting logging level to 0
15857
+ INFO:megatron.training.initialize:Setting logging level to 0
15858
+ INFO:megatron.training.initialize:Setting logging level to 0
15859
+ INFO:megatron.training.initialize:Setting logging level to 0
15860
+ INFO:megatron.training.initialize:Setting logging level to 0
15861
+ INFO:megatron.training.initialize:Setting logging level to 0
15862
+ INFO:megatron.training.initialize:Setting logging level to 0
15863
+ INFO:megatron.training.initialize:Setting logging level to 0
15864
+ INFO:megatron.training.initialize:Setting logging level to 0
15865
+ INFO:megatron.training.initialize:Setting logging level to 0
15866
+ INFO:megatron.training.initialize:Setting logging level to 0
15867
+ INFO:megatron.training.initialize:Setting logging level to 0
15868
+ INFO:megatron.training.initialize:Setting logging level to 0
15869
+ INFO:megatron.training.initialize:Setting logging level to 0
15870
+ INFO:megatron.training.initialize:Setting logging level to 0
15871
+ INFO:megatron.training.initialize:Setting logging level to 0
15872
+ INFO:megatron.training.initialize:Setting logging level to 0
15873
+ INFO:megatron.training.initialize:Setting logging level to 0
15874
+ INFO:megatron.training.initialize:Setting logging level to 0
15875
+ INFO:megatron.training.initialize:Setting logging level to 0
15876
+ INFO:megatron.training.initialize:Setting logging level to 0
15877
+ INFO:megatron.training.initialize:Setting logging level to 0
15878
+ INFO:megatron.training.initialize:Setting logging level to 0
15879
+ INFO:megatron.training.initialize:Setting logging level to 0
15880
+ INFO:megatron.training.initialize:Setting logging level to 0
15881
+ INFO:megatron.training.initialize:Setting logging level to 0
15882
+ INFO:megatron.training.initialize:Setting logging level to 0
15883
+ INFO:megatron.training.initialize:Setting logging level to 0
15884
+ > initialized tensor model parallel with size 8
15885
+ > initialized pipeline model parallel with size 1
15886
+ > setting random seeds to 1234 ...
15887
+ > compiling dataset index builder ...
15888
+ make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
15889
+ make: Nothing to be done for 'default'.
15890
+ make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
15891
+ >>> done with dataset index builder. Compilation time: 0.041 seconds
15892
+ > compiling and loading fused kernels ...
15893
+ >>> done with compiling and loading fused kernels. Compilation time: 2.848 seconds
15894
+ time to initialize megatron (seconds): 9.089
15895
+ [after megatron is initialized] datetime: 2025-06-21 20:56:05
15896
+ building GPT model ...
15897
+ >>> embedding
15898
+ >>> decoder
15899
+ >>> output_layer
15900
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
15901
+ >>> embedding
15902
+ >>> decoder
15903
+ >>> output_layer
15904
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
15905
+ >>> embedding
15906
+ >>> decoder
15907
+ >>> output_layer
15908
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
15909
+ >>> embedding
15910
+ >>> decoder
15911
+ >>> output_layer
15912
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
15913
+ >>> embedding
15914
+ >>> decoder
15915
+ >>> output_layer
15916
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
15917
+ >>> embedding
15918
+ >>> decoder
15919
+ >>> output_layer
15920
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
15921
+ >>> embedding
15922
+ >>> decoder
15923
+ >>> output_layer
15924
+ >>> embedding
15925
+ >>> decoder
15926
+ >>> output_layer
15927
+ >>> embedding
15928
+ >>> decoder
15929
+ >>> output_layer
15930
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
15931
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
15932
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
15933
+ >>> embedding
15934
+ >>> decoder
15935
+ >>> output_layer
15936
+ >>> embedding
15937
+ >>> decoder
15938
+ >>> output_layer
15939
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
15940
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
15941
+ >>> embedding
15942
+ >>> decoder
15943
+ >>> output_layer
15944
+ >>> embedding
15945
+ >>> decoder
15946
+ >>> output_layer
15947
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
15948
+ >>> embedding
15949
+ >>> embedding
15950
+ >>> decoder
15951
+ >>> output_layer
15952
+ >>> decoder
15953
+ >>> embedding
15954
+ >>> decoder
15955
+ >>> output_layer
15956
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
15957
+ >>> output_layer
15958
+ >>> embedding
15959
+ >>> decoder
15960
+ >>> output_layer
15961
+ >>> embedding
15962
+ >>> decoder
15963
+ >>> output_layer
15964
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
15965
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
15966
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
15967
+ >>> embedding
15968
+ >>> decoder
15969
+ >>> output_layer
15970
+ >>> embedding
15971
+ >>> decoder
15972
+ >>> output_layer
15973
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
15974
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
15975
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
15976
+ >>> embedding
15977
+ >>> decoder
15978
+ >>> output_layer
15979
+ >>> embedding
15980
+ >>> decoder
15981
+ >>> output_layer
15982
+ >>> embedding
15983
+ >>> decoder
15984
+ >>> output_layer
15985
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
15986
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
15987
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
15988
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
15989
+ >>> embedding
15990
+ >>> decoder
15991
+ >>> output_layer
15992
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
15993
+ >>> embedding
15994
+ >>> decoder
15995
+ >>> output_layer
15996
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
15997
+ >>> embedding
15998
+ >>> decoder
15999
+ >>> output_layer
16000
+ >>> embedding
16001
+ >>> decoder
16002
+ >>> output_layer
16003
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
16004
+ >>> embedding
16005
+ >>> decoder
16006
+ >>> output_layer
16007
+ >>> embedding
16008
+ >>> decoder
16009
+ >>> output_layer
16010
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
16011
+ >>> embedding
16012
+ >>> decoder
16013
+ >>> output_layer
16014
+ >>> embedding
16015
+ >>> decoder
16016
+ >>> output_layer
16017
+ >>> embedding
16018
+ >>> decoder
16019
+ >>> output_layer
16020
+ >>> embedding
16021
+ >>> decoder
16022
+ >>> output_layer
16023
+ >>> embedding
16024
+ >>> decoder
16025
+ >>> output_layer
16026
+ >>> embedding
16027
+ >>> decoder
16028
+ >>> output_layer
16029
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
16030
+ >>> embedding
16031
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
16032
+ >>> embedding > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16033
+
16034
+ >>> decoder
16035
+ >>> output_layer
16036
+ >>> decoder
16037
+ >>> output_layer
16038
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
16039
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
16040
+ >>> embedding
16041
+ >>> decoder
16042
+ >>> output_layer
16043
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
16044
+ >>> embedding
16045
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432>>> decoder
16046
+
16047
+ >>> output_layer
16048
+ >>> embedding
16049
+ >>> decoder
16050
+ >>> output_layer
16051
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
16052
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16053
+ >>> embedding
16054
+ >>> decoder
16055
+ >>> output_layer
16056
+ >>> embedding
16057
+ >>> decoder
16058
+ >>> output_layer
16059
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
16060
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
16061
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
16062
+ >>> embedding
16063
+ >>> decoder
16064
+ >>> output_layer
16065
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
16066
+ >>> embedding
16067
+ >>> decoder
16068
+ >>> output_layer
16069
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
16070
+ > number of parameters on (tensor, pipeline) model parallel rank (2, 0): 137426432
16071
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
16072
+ >>> embedding
16073
+ >>> decoder
16074
+ >>> embedding
16075
+ >>> decoder
16076
+ >>> output_layer
16077
+ >>> output_layer
16078
+ >>> embedding
16079
+ >>> decoder
16080
+ >>> output_layer
16081
+ >>> embedding
16082
+ >>> decoder
16083
+ >>> output_layer
16084
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
16085
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
16086
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16087
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16088
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
16089
+ >>> embedding
16090
+ >>> decoder
16091
+ >>> output_layer
16092
+ >>> embedding
16093
+ >>> decoder
16094
+ >>> output_layer
16095
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16096
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
16097
+ >>> embedding
16098
+ >>> decoder
16099
+ >>> output_layer
16100
+ >>> embedding
16101
+ >>> decoder
16102
+ >>> output_layer
16103
+ >>> embedding
16104
+ >>> decoder
16105
+ >>> output_layer
16106
+ >>> embedding
16107
+ >>> decoder
16108
+ >>> output_layer
16109
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
16110
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432
16111
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
16112
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
16113
+ >>> embedding
16114
+ >>> decoder
16115
+ >>> output_layer
16116
+ > number of parameters on (tensor, pipeline) model parallel rank (7, 0): 137426432
16117
+ >>> embedding
16118
+ >>> decoder
16119
+ >>> output_layer
16120
+ > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
16121
+ >>> embedding
16122
+ >>> decoder
16123
+ >>> output_layer
16124
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16125
+ >>> embedding
16126
+ >>> decoder
16127
+ >>> output_layer
16128
+ >>> embedding
16129
+ >>> embedding>>> decoder
16130
+
16131
+ >>> output_layer
16132
+ >>> decoder
16133
+ >>> output_layer
16134
+ >>> embedding
16135
+ >>> decoder
16136
+ >>> output_layer
16137
+ > number of parameters on (tensor, pipeline) model parallel rank (5, 0): 137426432
16138
+ > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 137426432 > number of parameters on (tensor, pipeline) model parallel rank (3, 0): 137426432
16139
+
16140
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
16141
+ INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False)
16142
+ >>> embedding
16143
+ >>> decoder
16144
+ >>> output_layer
16145
+ >>> embedding
16146
+ >>> decoder
16147
+ >>> output_layer
16148
+ > number of parameters on (tensor, pipeline) model parallel rank (4, 0): 137426432
16149
+ > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 137426432
16150
+ INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1
16151
+ Params for bucket 1 (137426432 elements, 137426432 padded size):
16152
+ module.decoder.layers.1.mlp.linear_fc2.weight
16153
+ module.decoder.layers.1.self_attention.linear_proj.bias
16154
+ module.decoder.layers.0.self_attention.linear_proj.bias
16155
+ module.embedding.word_embeddings.weight
16156
+ module.decoder.final_layernorm.bias
16157
+ module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias
16158
+ module.decoder.layers.0.mlp.linear_fc2.weight
16159
+ module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias
16160
+ module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight
16161
+ module.decoder.layers.1.self_attention.linear_qkv.bias
16162
+ module.decoder.layers.0.mlp.linear_fc2.bias
16163
+ module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight
16164
+ module.decoder.layers.0.self_attention.linear_qkv.bias
16165
+ module.decoder.final_layernorm.weight
16166
+ module.decoder.layers.1.mlp.linear_fc1.weight
16167
+ module.decoder.layers.0.mlp.linear_fc1.weight
16168
+ module.embedding.position_embeddings.weight
16169
+ module.decoder.layers.1.mlp.linear_fc2.bias
16170
+ module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight
16171
+ module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight
16172
+ module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias
16173
+ module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias
16174
+ module.decoder.layers.1.mlp.linear_fc1.bias
16175
+ module.decoder.layers.0.mlp.linear_fc1.bias
16176
+ module.decoder.layers.1.self_attention.linear_qkv.weight
16177
+ module.decoder.layers.1.self_attention.linear_proj.weight
16178
+ module.decoder.layers.0.self_attention.linear_qkv.weight
16179
+ module.decoder.layers.0.self_attention.linear_proj.weight
16180
+ INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=<megatron.core.timers.Timers object at 0x154d8318a120>, config_logger_dir='')
16181
+ INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine
16182
+ >>> embedding
16183
+ >>> decoder
16184
+ >>> output_layer
16185
+ > number of parameters on (tensor, pipeline) model parallel rank (6, 0): 137426432
16186
+ loading distributed checkpoint from gpt-checkpoint at iteration 10
16187
+ Running ctx_length=24576, TP_SIZE=8, CP_SIZE=8, BATCH_SIZE=32
16188
+ Cleaning up checkpoint directory: gpt-checkpoint
16189
+ Cleaning up checkpoint directory: gpt-checkpoint
16190
+ Cleaning up checkpoint directory: gpt-checkpoint
16191
+ Cleaning up checkpoint directory: gpt-checkpoint
16192
+ Cleaning up checkpoint directory: gpt-checkpoint
16193
+ Cleaning up checkpoint directory: gpt-checkpoint
16194
+ Cleaning up checkpoint directory: gpt-checkpoint
16195
+ --------------------------------
16196
+ CTX_LENGTH: 24576
16197
+ TP_SIZE: 8
16198
+ CP_SIZE: 8
16199
+ --------------------------------
16200
+ CTX_LENGTH: 24576
16201
+ TP_SIZE: 8
16202
+ --------------------------------
16203
+ CTX_LENGTH: 24576
16204
+ TP_SIZE: 8
16205
+ CP_SIZE: 8
16206
+ --------------------------------
16207
+ CTX_LENGTH: 24576
16208
+ --------------------------------
16209
+ CTX_LENGTH: 24576
16210
+ TP_SIZE: 8
16211
+ CP_SIZE: 8
16212
+ CHECKPOINT_PATH: gpt-checkpoint
16213
+ CHECKPOINT_PATH: gpt-checkpoint
16214
+ --------------------------------
16215
+ CTX_LENGTH: 24576
16216
+ TP_SIZE: 8
16217
+ CP_SIZE: 8
16218
+ CHECKPOINT_PATH: gpt-checkpoint
16219
+ --------------------------------
16220
+ CTX_LENGTH: 24576
16221
+ TP_SIZE: 8
16222
+ CP_SIZE: 8
16223
+ CP_SIZE: 8
16224
+ CHECKPOINT_PATH: gpt-checkpoint
16225
+ CHECKPOINT_PATH: gpt-checkpoint
16226
+ TP_SIZE: 8
16227
+ CP_SIZE: 8
16228
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16229
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16230
+ --------------------------------
16231
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16232
+ --------------------------------
16233
+ CHECKPOINT_PATH: gpt-checkpoint
16234
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16235
+ --------------------------------
16236
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16237
+ CHECKPOINT_PATH: gpt-checkpoint
16238
+ --------------------------------
16239
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16240
+ --------------------------------
16241
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16242
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16243
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16244
+ --------------------------------
16245
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16246
+ --------------------------------
16247
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16248
+ Cleaning up checkpoint directory: gpt-checkpoint
16249
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16250
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16251
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16252
+ --------------------------------
16253
+ CTX_LENGTH: 24576
16254
+ TP_SIZE: 8
16255
+ CP_SIZE: 8
16256
+ CHECKPOINT_PATH: gpt-checkpoint
16257
+ PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron
16258
+ --------------------------------
16259
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
16260
+ INFO:megatron.training.initialize:Setting logging level to 0
16261
+ INFO:megatron.training.initialize:Setting logging level to 0
16262
+ INFO:megatron.training.initialize:Setting logging level to 0
16263
+ INFO:megatron.training.initialize:Setting logging level to 0
16264
+ INFO:megatron.training.initialize:Setting logging level to 0
16265
+ INFO:megatron.training.initialize:Setting logging level to 0
16266
+ INFO:megatron.training.initialize:Setting logging level to 0
16267
+ WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written.
16268
+ WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it
16269
+ INFO:megatron.training.initialize:Setting logging level to 0
16270
+ INFO:megatron.training.initialize:Setting logging level to 0
16271
+ INFO:megatron.training.initialize:Setting logging level to 0
16272
+ INFO:megatron.training.initialize:Setting logging level to 0
16273
+ INFO:megatron.training.initialize:Setting logging level to 0
16274
+ INFO:megatron.training.initialize:Setting logging level to 0
16275
+ INFO:megatron.training.initialize:Setting logging level to 0
16276
+ INFO:megatron.training.initialize:Setting logging level to 0
16277
+ INFO:megatron.training.initialize:Setting logging level to 0
16278
+ INFO:megatron.training.initialize:Setting logging level to 0
16279
+ INFO:megatron.training.initialize:Setting logging level to 0
16280
+ INFO:megatron.training.initialize:Setting logging level to 0
16281
+ INFO:megatron.training.initialize:Setting logging level to 0
16282
+ INFO:megatron.training.initialize:Setting logging level to 0
16283
+ INFO:megatron.training.initialize:Setting logging level to 0
16284
+ INFO:megatron.training.initialize:Setting logging level to 0
16285
+ INFO:megatron.training.initialize:Setting logging level to 0
16286
+ INFO:megatron.training.initialize:Setting logging level to 0
16287
+ INFO:megatron.training.initialize:Setting logging level to 0
16288
+ INFO:megatron.training.initialize:Setting logging level to 0
16289
+ INFO:megatron.training.initialize:Setting logging level to 0
16290
+ INFO:megatron.training.initialize:Setting logging level to 0
16291
+ INFO:megatron.training.initialize:Setting logging level to 0
16292
+ INFO:megatron.training.initialize:Setting logging level to 0
16293
+ INFO:megatron.training.initialize:Setting logging level to 0
16294
+ using world size: 64, data-parallel size: 1, context-parallel size: 8, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 8, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0
16295
+ Number of virtual stages per pipeline stage: None
16296
+ WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used
16297
+ using torch.float16 for parameters ...
16298
+ ------------------------ arguments ------------------------
16299
+ account_for_embedding_in_pipeline_split ......... False
16300
+ account_for_loss_in_pipeline_split .............. False
16301
+ accumulate_allreduce_grads_in_fp32 .............. False
16302
+ adam_beta1 ...................................... 0.9
16303
+ adam_beta2 ...................................... 0.999
16304
+ adam_eps ........................................ 1e-08
16305
+ add_bias_linear ................................. True
16306
+ add_position_embedding .......................... True
16307
+ add_qkv_bias .................................... True
16308
+ adlr_autoresume ................................. False
16309
+ adlr_autoresume_interval ........................ 1000
16310
+ align_grad_reduce ............................... True
16311
+ align_param_gather .............................. False
16312
+ app_tag_run_name ................................ None
16313
+ app_tag_run_version ............................. 0.0.0
16314
+ apply_layernorm_1p .............................. False
16315
+ apply_query_key_layer_scaling ................... False
16316
+ apply_residual_connection_post_layernorm ........ False
16317
+ apply_rope_fusion ............................... False
16318
+ async_save ...................................... None
16319
+ async_tensor_model_parallel_allreduce ........... True
16320
+ attention_backend ............................... AttnBackend.auto
16321
+ attention_dropout ............................... 0.1
16322
+ attention_softmax_in_fp32 ....................... False
16323
+ auto_detect_ckpt_format ......................... False
16324
+ barrier_with_L1_time ............................ True
16325
+ bert_binary_head ................................ True
16326
+ bert_embedder_type .............................. megatron
16327
+ bert_load ....................................... None
16328
+ bf16 ............................................ False
16329
+ bias_dropout_fusion ............................. True
16330
+ bias_gelu_fusion ................................ True
16331
+ bias_swiglu_fusion .............................. True
16332
+ biencoder_projection_dim ........................ 0
16333
+ biencoder_shared_query_context_model ............ False
16334
+ block_data_path ................................. None
16335
+ calc_ft_timeouts ................................ False
16336
+ calculate_per_token_loss ........................ False
16337
+ check_for_large_grads ........................... False
16338
+ check_for_nan_in_loss_and_grad .................. False
16339
+ check_for_spiky_loss ............................ False
16340
+ check_weight_hash_across_dp_replicas_interval ... None
16341
+ ckpt_assume_constant_structure .................. False
16342
+ ckpt_convert_format ............................. None
16343
+ ckpt_convert_save ............................... None
16344
+ ckpt_convert_update_legacy_dist_opt_format ...... False
16345
+ ckpt_format ..................................... torch_dist
16346
+ ckpt_fully_parallel_load ........................ False
16347
+ ckpt_fully_parallel_save ........................ True
16348
+ ckpt_fully_parallel_save_deprecated ............. False
16349
+ ckpt_step ....................................... None
16350
+ classes_fraction ................................ 1.0
16351
+ clip_grad ....................................... 1.0
16352
+ clone_scatter_output_in_embedding ............... True
16353
+ config_logger_dir ...............................
16354
+ consumed_train_samples .......................... 0
16355
+ consumed_valid_samples .......................... 0
16356
+ context_parallel_size ........................... 8
16357
+ cp_comm_type .................................... ['p2p']
16358
+ create_attention_mask_in_dataloader ............. True
16359
+ cross_entropy_fusion_impl ....................... native
16360
+ cross_entropy_loss_fusion ....................... False
16361
+ cuda_graph_scope ................................ full
16362
+ cuda_graph_warmup_steps ......................... 3
16363
+ data_args_path .................................. None
16364
+ data_cache_path ................................. None
16365
+ data_parallel_random_init ....................... False
16366
+ data_parallel_sharding_strategy ................. no_shard
16367
+ data_parallel_size .............................. 1
16368
+ data_path ....................................... None
16369
+ data_per_class_fraction ......................... 1.0
16370
+ data_sharding ................................... True
16371
+ dataloader_type ................................. single
16372
+ ddp_average_in_collective ....................... False
16373
+ ddp_bucket_size ................................. None
16374
+ ddp_num_buckets ................................. None
16375
+ ddp_pad_buckets_for_high_nccl_busbw ............. False
16376
+ decoder_first_pipeline_num_layers ............... None
16377
+ decoder_last_pipeline_num_layers ................ None
16378
+ decoder_num_layers .............................. None
16379
+ decoder_seq_length .............................. None
16380
+ decoupled_lr .................................... None
16381
+ decoupled_min_lr ................................ None
16382
+ decrease_batch_size_if_needed ................... False
16383
+ defer_embedding_wgrad_compute ................... False
16384
+ deprecated_use_mcore_models ..................... False
16385
+ deterministic_mode .............................. False
16386
+ dino_bottleneck_size ............................ 256
16387
+ dino_freeze_last_layer .......................... 1
16388
+ dino_head_hidden_size ........................... 2048
16389
+ dino_local_crops_number ......................... 10
16390
+ dino_local_img_size ............................. 96
16391
+ dino_norm_last_layer ............................ False
16392
+ dino_teacher_temp ............................... 0.07
16393
+ dino_warmup_teacher_temp ........................ 0.04
16394
+ dino_warmup_teacher_temp_epochs ................. 30
16395
+ disable_bf16_reduced_precision_matmul ........... False
16396
+ disable_mamba_mem_eff_path ...................... False
16397
+ disable_straggler_on_startup .................... False
16398
+ dist_ckpt_format_deprecated ..................... None
16399
+ dist_ckpt_strictness ............................ assume_ok_unexpected
16400
+ distribute_saved_activations .................... False
16401
+ distributed_backend ............................. nccl
16402
+ distributed_timeout_minutes ..................... 10
16403
+ embedding_path .................................. None
16404
+ empty_unused_memory_level ....................... 0
16405
+ enable_cuda_graph ............................... False
16406
+ enable_ft_package ............................... False
16407
+ enable_gloo_process_groups ...................... True
16408
+ enable_msc ...................................... True
16409
+ enable_one_logger ............................... True
16410
+ encoder_num_layers .............................. 2
16411
+ encoder_pipeline_model_parallel_size ............ 0
16412
+ encoder_seq_length .............................. 24576
16413
+ encoder_tensor_model_parallel_size .............. 0
16414
+ end_weight_decay ................................ 0.1
16415
+ eod_mask_loss ................................... False
16416
+ error_injection_rate ............................ 0
16417
+ error_injection_type ............................ transient_error
16418
+ eval_interval ................................... 16
16419
+ eval_iters ...................................... 1
16420
+ evidence_data_path .............................. None
16421
+ exit_duration_in_mins ........................... None
16422
+ exit_interval ................................... None
16423
+ exit_on_missing_checkpoint ...................... False
16424
+ exit_signal_handler ............................. False
16425
+ exp_avg_dtype ................................... torch.float32
16426
+ exp_avg_sq_dtype ................................ torch.float32
16427
+ expert_model_parallel_size ...................... 1
16428
+ expert_tensor_parallel_size ..................... 8
16429
+ external_cuda_graph ............................. False
16430
+ ffn_hidden_size ................................. 16384
16431
+ finetune ........................................ False
16432
+ first_last_layers_bf16 .......................... False
16433
+ flash_decode .................................... False
16434
+ fp16 ............................................ True
16435
+ fp16_lm_cross_entropy ........................... False
16436
+ fp32_residual_connection ........................ False
16437
+ fp8 ............................................. None
16438
+ fp8_amax_compute_algo ........................... most_recent
16439
+ fp8_amax_history_len ............................ 1
16440
+ fp8_interval .................................... 1
16441
+ fp8_margin ...................................... 0
16442
+ fp8_param_gather ................................ False
16443
+ fp8_recipe ...................................... delayed
16444
+ fp8_wgrad ....................................... True
16445
+ fsdp_double_buffer .............................. False
16446
+ global_batch_size ............................... 1
16447
+ grad_reduce_in_bf16 ............................. False
16448
+ gradient_accumulation_fusion .................... True
16449
+ gradient_reduce_div_fusion ...................... True
16450
+ group_query_attention ........................... True
16451
+ head_lr_mult .................................... 1.0
16452
+ heterogeneous_layers_config_encoded_json ........ None
16453
+ heterogeneous_layers_config_path ................ None
16454
+ hidden_dropout .................................. 0.1
16455
+ hidden_size ..................................... 4096
16456
+ hierarchical_context_parallel_sizes ............. None
16457
+ high_priority_stream_groups ..................... []
16458
+ hybrid_attention_ratio .......................... 0.0
16459
+ hybrid_mlp_ratio ................................ 0.0
16460
+ hybrid_override_pattern ......................... None
16461
+ hysteresis ...................................... 2
16462
+ ict_head_size ................................... None
16463
+ ict_load ........................................ None
16464
+ img_h ........................................... 224
16465
+ img_w ........................................... 224
16466
+ indexer_batch_size .............................. 128
16467
+ indexer_log_interval ............................ 1000
16468
+ inference_batch_times_seqlen_threshold .......... -1
16469
+ inference_dynamic_batching ...................... False
16470
+ inference_dynamic_batching_buffer_guaranteed_fraction 0.2
16471
+ inference_dynamic_batching_buffer_overflow_factor None
16472
+ inference_dynamic_batching_buffer_size_gb ....... 40.0
16473
+ inference_dynamic_batching_chunk_size ........... 256
16474
+ inference_dynamic_batching_max_requests_override None
16475
+ inference_dynamic_batching_max_tokens_override .. None
16476
+ inference_max_batch_size ........................ 8
16477
+ inference_max_seq_length ........................ 2560
16478
+ inference_rng_tracker ........................... False
16479
+ init_method_std ................................. 0.02
16480
+ init_method_xavier_uniform ...................... False
16481
+ init_model_with_meta_device ..................... False
16482
+ initial_loss_scale .............................. 4294967296
16483
+ inprocess_active_world_size ..................... 64
16484
+ inprocess_barrier_timeout ....................... 120
16485
+ inprocess_completion_timeout .................... 120
16486
+ inprocess_empty_cuda_cache ...................... False
16487
+ inprocess_granularity ........................... node
16488
+ inprocess_hard_timeout .......................... 90
16489
+ inprocess_heartbeat_interval .................... 30
16490
+ inprocess_heartbeat_timeout ..................... 60
16491
+ inprocess_last_call_wait ........................ 1
16492
+ inprocess_max_iterations ........................ None
16493
+ inprocess_monitor_process_interval .............. 1.0
16494
+ inprocess_monitor_thread_interval ............... 1.0
16495
+ inprocess_progress_watchdog_interval ............ 1.0
16496
+ inprocess_restart ............................... False
16497
+ inprocess_soft_timeout .......................... 60
16498
+ inprocess_termination_grace_time ................ 1
16499
+ is_hybrid_model ................................. False
16500
+ iter_per_epoch .................................. 1250
16501
+ iterations_to_skip .............................. []
16502
+ keep_fp8_transpose_cache_when_using_custom_fsdp . False
16503
+ kv_channels ..................................... 64
16504
+ kv_lora_rank .................................... 32
16505
+ lazy_mpu_init ................................... None
16506
+ load ............................................ gpt-checkpoint
16507
+ load_model_opt_format ........................... False
16508
+ local_rank ...................................... 0
16509
+ log_interval .................................... 1
16510
+ log_loss_scale_to_tensorboard ................... True
16511
+ log_memory_to_tensorboard ....................... False
16512
+ log_num_zeros_in_grad ........................... False
16513
+ log_params_norm ................................. False
16514
+ log_progress .................................... False
16515
+ log_straggler ................................... False
16516
+ log_throughput .................................. False
16517
+ log_timers_to_tensorboard ....................... False
16518
+ log_validation_ppl_to_tensorboard ............... False
16519
+ log_world_size_to_tensorboard ................... False
16520
+ logging_level ................................... 0
16521
+ loss_scale ...................................... None
16522
+ loss_scale_window ............................... 1000
16523
+ lr .............................................. 0.0005
16524
+ lr_decay_iters .................................. 150000
16525
+ lr_decay_samples ................................ None
16526
+ lr_decay_style .................................. cosine
16527
+ lr_warmup_fraction .............................. None
16528
+ lr_warmup_init .................................. 0.0
16529
+ lr_warmup_iters ................................. 2
16530
+ lr_warmup_samples ............................... 0
16531
+ lr_wsd_decay_iters .............................. None
16532
+ lr_wsd_decay_samples ............................ None
16533
+ lr_wsd_decay_style .............................. exponential
16534
+ main_grads_dtype ................................ torch.float32
16535
+ main_params_dtype ............................... torch.float32
16536
+ make_vocab_size_divisible_by .................... 128
16537
+ mamba_head_dim .................................. 64
16538
+ mamba_num_groups ................................ 8
16539
+ mamba_num_heads ................................. None
16540
+ mamba_state_dim ................................. 128
16541
+ manual_gc ....................................... False
16542
+ manual_gc_eval .................................. True
16543
+ manual_gc_interval .............................. 0
16544
+ mask_factor ..................................... 1.0
16545
+ mask_prob ....................................... 0.15
16546
+ mask_type ....................................... random
16547
+ masked_softmax_fusion ........................... True
16548
+ max_position_embeddings ......................... 24576
16549
+ max_tokens_to_oom ............................... 12000
16550
+ memory_snapshot_path ............................ snapshot.pickle
16551
+ merge_file ...................................... merges.txt
16552
+ micro_batch_size ................................ 1
16553
+ microbatch_group_size_per_vp_stage .............. None
16554
+ mid_level_dataset_surplus ....................... 0.005
16555
+ min_loss_scale .................................. 1.0
16556
+ min_lr .......................................... 0.0
16557
+ mlp_chunks_for_prefill .......................... 1
16558
+ mmap_bin_files .................................. True
16559
+ mock_data ....................................... True
16560
+ moe_apply_probs_on_input ........................ False
16561
+ moe_aux_loss_coeff .............................. 0.0
16562
+ moe_enable_deepep ............................... False
16563
+ moe_expert_capacity_factor ...................... None
16564
+ moe_extended_tp ................................. False
16565
+ moe_ffn_hidden_size ............................. None
16566
+ moe_grouped_gemm ................................ False
16567
+ moe_input_jitter_eps ............................ None
16568
+ moe_layer_freq .................................. 1
16569
+ moe_layer_recompute ............................. False
16570
+ moe_pad_expert_input_to_capacity ................ False
16571
+ moe_per_layer_logging ........................... False
16572
+ moe_permute_fusion .............................. False
16573
+ moe_router_bias_update_rate ..................... 0.001
16574
+ moe_router_dtype ................................ None
16575
+ moe_router_enable_expert_bias ................... False
16576
+ moe_router_force_load_balancing ................. False
16577
+ moe_router_group_topk ........................... None
16578
+ moe_router_load_balancing_type .................. aux_loss
16579
+ moe_router_num_groups ........................... None
16580
+ moe_router_padding_for_fp8 ...................... False
16581
+ moe_router_pre_softmax .......................... False
16582
+ moe_router_score_function ....................... softmax
16583
+ moe_router_topk ................................. 2
16584
+ moe_router_topk_scaling_factor .................. None
16585
+ moe_shared_expert_intermediate_size ............. None
16586
+ moe_shared_expert_overlap ....................... False
16587
+ moe_token_dispatcher_type ....................... allgather
16588
+ moe_token_drop_policy ........................... probs
16589
+ moe_use_legacy_grouped_gemm ..................... False
16590
+ moe_use_upcycling ............................... False
16591
+ moe_z_loss_coeff ................................ None
16592
+ mrope_section ................................... None
16593
+ mscale .......................................... 1.0
16594
+ mscale_all_dim .................................. 1.0
16595
+ mtp_loss_scaling_factor ......................... 0.1
16596
+ mtp_num_layers .................................. None
16597
+ multi_latent_attention .......................... False
16598
+ nccl_all_reduce_for_prefill ..................... False
16599
+ nccl_communicator_config_path ................... None
16600
+ nccl_ub ......................................... False
16601
+ no_load_optim ................................... None
16602
+ no_load_rng ..................................... None
16603
+ no_persist_layer_norm ........................... False
16604
+ no_rope_freq .................................... None
16605
+ no_save_optim ................................... None
16606
+ no_save_rng ..................................... None
16607
+ non_persistent_ckpt_type ........................ None
16608
+ non_persistent_global_ckpt_dir .................. None
16609
+ non_persistent_local_ckpt_algo .................. fully_parallel
16610
+ non_persistent_local_ckpt_dir ................... None
16611
+ non_persistent_save_interval .................... None
16612
+ norm_epsilon .................................... 1e-05
16613
+ normalization ................................... LayerNorm
16614
+ num_attention_heads ............................. 64
16615
+ num_channels .................................... 3
16616
+ num_classes ..................................... 1000
16617
+ num_dataset_builder_threads ..................... 1
16618
+ num_distributed_optimizer_instances ............. 1
16619
+ num_experts ..................................... None
16620
+ num_layers ...................................... 2
16621
+ num_layers_at_end_in_bf16 ....................... 1
16622
+ num_layers_at_start_in_bf16 ..................... 1
16623
+ num_layers_per_virtual_pipeline_stage ........... None
16624
+ num_query_groups ................................ 16
16625
+ num_virtual_stages_per_pipeline_rank ............ None
16626
+ num_workers ..................................... 2
16627
+ object_storage_cache_path ....................... None
16628
+ one_logger_async ................................ False
16629
+ one_logger_project .............................. megatron-lm
16630
+ one_logger_run_name ............................. None
16631
+ onnx_safe ....................................... None
16632
+ openai_gelu ..................................... False
16633
+ optimizer ....................................... adam
16634
+ optimizer_cpu_offload ........................... False
16635
+ optimizer_offload_fraction ...................... 1.0
16636
+ output_bert_embeddings .......................... False
16637
+ overlap_cpu_optimizer_d2h_h2d ................... False
16638
+ overlap_grad_reduce ............................. False
16639
+ overlap_p2p_comm ................................ False
16640
+ overlap_p2p_comm_warmup_flush ................... False
16641
+ overlap_param_gather ............................ False
16642
+ overlap_param_gather_with_optimizer_step ........ False
16643
+ override_opt_param_scheduler .................... False
16644
+ params_dtype .................................... torch.float16
16645
+ patch_dim ....................................... 16
16646
+ per_split_data_args_path ........................ None
16647
+ perform_initialization .......................... True
16648
+ pin_cpu_grads ................................... True
16649
+ pin_cpu_params .................................. True
16650
+ pipeline_model_parallel_comm_backend ............ None
16651
+ pipeline_model_parallel_size .................... 1
16652
+ pipeline_model_parallel_split_rank .............. None
16653
+ position_embedding_type ......................... learned_absolute
16654
+ pretrained_checkpoint ........................... None
16655
+ profile ......................................... False
16656
+ profile_ranks ................................... [0]
16657
+ profile_step_end ................................ 12
16658
+ profile_step_start .............................. 10
16659
+ q_lora_rank ..................................... None
16660
+ qk_head_dim ..................................... 128
16661
+ qk_l2_norm ...................................... False
16662
+ qk_layernorm .................................... False
16663
+ qk_pos_emb_head_dim ............................. 64
16664
+ query_in_block_prob ............................. 0.1
16665
+ rampup_batch_size ............................... None
16666
+ rank ............................................ 0
16667
+ recompute_granularity ........................... None
16668
+ recompute_method ................................ None
16669
+ recompute_modules ............................... None
16670
+ recompute_num_layers ............................ None
16671
+ record_memory_history ........................... False
16672
+ relative_attention_max_distance ................. 128
16673
+ relative_attention_num_buckets .................. 32
16674
+ replication ..................................... False
16675
+ replication_factor .............................. 2
16676
+ replication_jump ................................ None
16677
+ rerun_mode ...................................... disabled
16678
+ reset_attention_mask ............................ False
16679
+ reset_position_ids .............................. False
16680
+ result_rejected_tracker_filename ................ None
16681
+ retriever_report_topk_accuracies ................ []
16682
+ retriever_score_scaling ......................... False
16683
+ retriever_seq_length ............................ 256
16684
+ retro_add_retriever ............................. False
16685
+ retro_attention_gate ............................ 1
16686
+ retro_cyclic_train_iters ........................ None
16687
+ retro_encoder_attention_dropout ................. 0.1
16688
+ retro_encoder_hidden_dropout .................... 0.1
16689
+ retro_encoder_layers ............................ 2
16690
+ retro_num_neighbors ............................. 2
16691
+ retro_num_retrieved_chunks ...................... 2
16692
+ retro_project_dir ............................... None
16693
+ retro_verify_neighbor_count ..................... True
16694
+ rope_scaling_factor ............................. 8.0
16695
+ rotary_base ..................................... 10000
16696
+ rotary_interleaved .............................. False
16697
+ rotary_percent .................................. 1.0
16698
+ rotary_scaling_factor ........................... 1.0
16699
+ rotary_seq_len_interpolation_factor ............. None
16700
+ run_workload_inspector_server ................... False
16701
+ sample_rate ..................................... 1.0
16702
+ save ............................................ gpt-checkpoint
16703
+ save_interval ................................... 16
16704
+ scatter_gather_tensors_in_pipeline .............. True
16705
+ seed ............................................ 1234
16706
+ seq_length ...................................... 24576
16707
+ sequence_parallel ............................... False
16708
+ sgd_momentum .................................... 0.9
16709
+ short_seq_prob .................................. 0.1
16710
+ skip_train ...................................... False
16711
+ skipped_train_samples ........................... 0
16712
+ spec ............................................ None
16713
+ split ........................................... None
16714
+ squared_relu .................................... False
16715
+ start_weight_decay .............................. 0.1
16716
+ straggler_ctrlr_port ............................ 65535
16717
+ straggler_minmax_count .......................... 1
16718
+ suggested_communication_unit_size ............... None
16719
+ swiglu .......................................... False
16720
+ swin_backbone_type .............................. tiny
16721
+ symmetric_ar_type ............................... None
16722
+ te_rng_tracker .................................. False
16723
+ tensor_model_parallel_size ...................... 8
16724
+ tensorboard_dir ................................. tensorboard-logs/
16725
+ tensorboard_log_interval ........................ 1
16726
+ tensorboard_queue_size .......................... 1000
16727
+ test_data_path .................................. None
16728
+ test_mode ....................................... False
16729
+ tiktoken_num_special_tokens ..................... 1000
16730
+ tiktoken_pattern ................................ None
16731
+ tiktoken_special_tokens ......................... None
16732
+ timing_log_level ................................ 0
16733
+ timing_log_option ............................... minmax
16734
+ titles_data_path ................................ None
16735
+ tokenizer_model ................................. None
16736
+ tokenizer_type .................................. GPT2BPETokenizer
16737
+ torch_fsdp2_reshard_after_forward ............... True
16738
+ tp_comm_bootstrap_backend ....................... nccl
16739
+ tp_comm_bulk_dgrad .............................. True
16740
+ tp_comm_bulk_wgrad .............................. True
16741
+ tp_comm_overlap ................................. False
16742
+ tp_comm_overlap_ag .............................. True
16743
+ tp_comm_overlap_cfg ............................. None
16744
+ tp_comm_overlap_rs .............................. True
16745
+ tp_comm_overlap_rs_dgrad ........................ False
16746
+ tp_comm_split_ag ................................ True
16747
+ tp_comm_split_rs ................................ True
16748
+ train_data_path ................................. None
16749
+ train_iters ..................................... 10
16750
+ train_samples ................................... None
16751
+ train_sync_interval ............................. None
16752
+ transformer_impl ................................ transformer_engine
16753
+ transformer_pipeline_model_parallel_size ........ 1
16754
+ untie_embeddings_and_output_weights ............. False
16755
+ use_checkpoint_args ............................. False
16756
+ use_checkpoint_opt_param_scheduler .............. False
16757
+ use_cpu_initialization .......................... None
16758
+ use_custom_fsdp ................................. False
16759
+ use_dist_ckpt ................................... True
16760
+ use_dist_ckpt_deprecated ........................ False
16761
+ use_distributed_optimizer ....................... False
16762
+ use_flash_attn .................................. False
16763
+ use_legacy_models ............................... False
16764
+ use_mp_args_from_checkpoint_args ................ False
16765
+ use_one_sent_docs ............................... False
16766
+ use_persistent_ckpt_worker ...................... False
16767
+ use_precision_aware_optimizer ................... False
16768
+ use_pytorch_profiler ............................ False
16769
+ use_ring_exchange_p2p ........................... False
16770
+ use_rope_scaling ................................ False
16771
+ use_rotary_position_embeddings .................. False
16772
+ use_sharp ....................................... False
16773
+ use_tokenizer_model_from_checkpoint_args ........ True
16774
+ use_torch_fsdp2 ................................. False
16775
+ use_torch_optimizer_for_cpu_offload ............. False
16776
+ use_tp_pp_dp_mapping ............................ False
16777
+ v_head_dim ...................................... 128
16778
+ valid_data_path ................................. None
16779
+ variable_seq_lengths ............................ False
16780
+ virtual_pipeline_model_parallel_size ............ None
16781
+ vision_backbone_type ............................ vit
16782
+ vision_pretraining .............................. False
16783
+ vision_pretraining_type ......................... classify
16784
+ vocab_extra_ids ................................. 0
16785
+ vocab_file ...................................... vocab.json
16786
+ vocab_size ...................................... None
16787
+ wandb_exp_name ..................................
16788
+ wandb_project ...................................
16789
+ wandb_save_dir ..................................
16790
+ weight_decay .................................... 0.1
16791
+ weight_decay_incr_style ......................... constant
16792
+ wgrad_deferral_limit ............................ 0
16793
+ world_size ...................................... 64
16794
+ yaml_cfg ........................................ None
16795
+ -------------------- end of arguments ---------------------
16796
+ INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1
16797
+ > building GPT2BPETokenizer tokenizer ...
16798
+ INFO:megatron.training.initialize:Setting logging level to 0
16799
+ INFO:megatron.training.initialize:Setting logging level to 0
16800
+ INFO:megatron.training.initialize:Setting logging level to 0
16801
+ INFO:megatron.training.initialize:Setting logging level to 0
16802
+ INFO:megatron.training.initialize:Setting logging level to 0
16803
+ INFO:megatron.training.initialize:Setting logging level to 0
16804
+ INFO:megatron.training.initialize:Setting logging level to 0
16805
+ INFO:megatron.training.initialize:Setting logging level to 0
16806
+ INFO:megatron.training.initialize:Setting logging level to 0
16807
+ > padded vocab (size: 50257) with 943 dummy tokens (new size: 51200)
16808
+ INFO:megatron.training.initialize:Setting logging level to 0
16809
+ WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED
16810
+ > initializing torch distributed ...
16811
+ INFO:megatron.training.initialize:Setting logging level to 0
16812
+ INFO:megatron.training.initialize:Setting logging level to 0
16813
+ INFO:megatron.training.initialize:Setting logging level to 0
16814
+ INFO:megatron.training.initialize:Setting logging level to 0
16815
+ INFO:megatron.training.initialize:Setting logging level to 0
16816
+ INFO:megatron.training.initialize:Setting logging level to 0
16817
+ INFO:megatron.training.initialize:Setting logging level to 0
16818
+ INFO:megatron.training.initialize:Setting logging level to 0
16819
+ INFO:megatron.training.initialize:Setting logging level to 0
16820
+ INFO:megatron.training.initialize:Setting logging level to 0
16821
+ INFO:megatron.training.initialize:Setting logging level to 0
16822
+ INFO:megatron.training.initialize:Setting logging level to 0
16823
+ INFO:megatron.training.initialize:Setting logging level to 0
16824
+ INFO:megatron.training.initialize:Setting logging level to 0
16825
+ INFO:megatron.training.initialize:Setting logging level to 0
16826
+ INFO:megatron.training.initialize:Setting logging level to 0
16827
+ INFO:megatron.training.initialize:Setting logging level to 0
16828
+ INFO:megatron.training.initialize:Setting logging level to 0
16829
+ INFO:megatron.training.initialize:Setting logging level to 0
16830
+ INFO:megatron.training.initialize:Setting logging level to 0
16831
+ INFO:megatron.training.initialize:Setting logging level to 0
16832
+ INFO:megatron.training.initialize:Setting logging level to 0
16833
+ > initialized tensor model parallel with size 8
16834
+ > initialized pipeline model parallel with size 1
16835
+ > setting random seeds to 1234 ...
16836
+ > compiling dataset index builder ...
16837
+ make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
16838
+ make: Nothing to be done for 'default'.
16839
+ make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets'
16840
+ >>> done with dataset index builder. Compilation time: 0.044 seconds
16841
+ WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations.
16842
+ > compiling and loading fused kernels ...
16843
+ >>> done with compiling and loading fused kernels. Compilation time: 4.149 seconds
attnserver.run_attnserver.slurm.sh.343195.out.log CHANGED
@@ -63637,3 +63637,296 @@ batch tensor after cp: labels torch.Size([1, 24576])
63637
  batch tensor after cp: loss_mask torch.Size([1, 24576])
63638
  batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63639
  batch tensor after cp: position_ids torch.Size([1, 24576])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63637
  batch tensor after cp: loss_mask torch.Size([1, 24576])
63638
  batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63639
  batch tensor after cp: position_ids torch.Size([1, 24576])
63640
+ batch tensor: tokens torch.Size([1, 98304])
63641
+ batch tensor: labels torch.Size([1, 98304])
63642
+ batch tensor: loss_mask torch.Size([1, 98304])
63643
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63644
+ batch tensor: position_ids torch.Size([1, 98304])
63645
+ batch tensor after cp: tokens torch.Size([1, 24576])
63646
+ batch tensor after cp: labels torch.Size([1, 24576])
63647
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63648
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63649
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63650
+ batch tensor: tokens torch.Size([1, 98304])
63651
+ batch tensor: labels torch.Size([1, 98304])
63652
+ batch tensor: loss_mask torch.Size([1, 98304])
63653
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63654
+ batch tensor: position_ids torch.Size([1, 98304])
63655
+ batch tensor after cp: tokens torch.Size([1, 24576])
63656
+ batch tensor after cp: labels torch.Size([1, 24576])
63657
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63658
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63659
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63660
+ batch tensor: tokens torch.Size([1, 98304])
63661
+ batch tensor: labels torch.Size([1, 98304])
63662
+ batch tensor: loss_mask torch.Size([1, 98304])
63663
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63664
+ batch tensor: position_ids torch.Size([1, 98304])
63665
+ batch tensor after cp: tokens torch.Size([1, 24576])
63666
+ batch tensor after cp: labels torch.Size([1, 24576])
63667
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63668
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63669
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63670
+ batch tensor: tokens torch.Size([1, 98304])
63671
+ batch tensor: labels torch.Size([1, 98304])
63672
+ batch tensor: loss_mask torch.Size([1, 98304])
63673
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63674
+ batch tensor: position_ids torch.Size([1, 98304])
63675
+ batch tensor after cp: tokens torch.Size([1, 24576])
63676
+ batch tensor after cp: labels torch.Size([1, 24576])
63677
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63678
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63679
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63680
+ batch tensor: tokens torch.Size([1, 98304])
63681
+ batch tensor: labels torch.Size([1, 98304])
63682
+ batch tensor: loss_mask torch.Size([1, 98304])
63683
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63684
+ batch tensor: position_ids torch.Size([1, 98304])
63685
+ batch tensor after cp: tokens torch.Size([1, 24576])
63686
+ batch tensor after cp: labels torch.Size([1, 24576])
63687
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63688
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63689
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63690
+ batch tensor: tokens torch.Size([1, 98304])
63691
+ batch tensor: labels torch.Size([1, 98304])
63692
+ batch tensor: loss_mask torch.Size([1, 98304])
63693
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63694
+ batch tensor: position_ids torch.Size([1, 98304])
63695
+ batch tensor after cp: tokens torch.Size([1, 24576])
63696
+ batch tensor after cp: labels torch.Size([1, 24576])
63697
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63698
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63699
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63700
+ batch tensor: tokens torch.Size([1, 98304])
63701
+ batch tensor: labels torch.Size([1, 98304])
63702
+ batch tensor: loss_mask torch.Size([1, 98304])
63703
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63704
+ batch tensor: position_ids torch.Size([1, 98304])
63705
+ batch tensor after cp: tokens torch.Size([1, 24576])
63706
+ batch tensor after cp: labels torch.Size([1, 24576])
63707
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63708
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63709
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63710
+ batch tensor: tokens torch.Size([1, 98304])
63711
+ batch tensor: labels torch.Size([1, 98304])
63712
+ batch tensor: loss_mask torch.Size([1, 98304])
63713
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63714
+ batch tensor: position_ids torch.Size([1, 98304])
63715
+ batch tensor after cp: tokens torch.Size([1, 24576])
63716
+ batch tensor after cp: labels torch.Size([1, 24576])
63717
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63718
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63719
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63720
+ batch tensor: tokens torch.Size([1, 98304])
63721
+ batch tensor: labels torch.Size([1, 98304])
63722
+ batch tensor: loss_mask torch.Size([1, 98304])
63723
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63724
+ batch tensor: position_ids torch.Size([1, 98304])
63725
+ batch tensor after cp: tokens torch.Size([1, 24576])
63726
+ batch tensor after cp: labels torch.Size([1, 24576])
63727
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63728
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63729
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63730
+ batch tensor: tokens torch.Size([1, 98304])
63731
+ batch tensor: labels torch.Size([1, 98304])
63732
+ batch tensor: loss_mask torch.Size([1, 98304])
63733
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63734
+ batch tensor: position_ids torch.Size([1, 98304])
63735
+ batch tensor after cp: tokens torch.Size([1, 24576])
63736
+ batch tensor after cp: labels torch.Size([1, 24576])
63737
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63738
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63739
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63740
+ batch tensor: tokens torch.Size([1, 98304])
63741
+ batch tensor: labels torch.Size([1, 98304])
63742
+ batch tensor: loss_mask torch.Size([1, 98304])
63743
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63744
+ batch tensor: position_ids torch.Size([1, 98304])
63745
+ batch tensor after cp: tokens torch.Size([1, 24576])
63746
+ batch tensor after cp: labels torch.Size([1, 24576])
63747
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63748
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63749
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63750
+ batch tensor: tokens torch.Size([1, 98304])
63751
+ batch tensor: labels torch.Size([1, 98304])
63752
+ batch tensor: loss_mask torch.Size([1, 98304])
63753
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63754
+ batch tensor: position_ids torch.Size([1, 98304])
63755
+ batch tensor after cp: tokens torch.Size([1, 24576])
63756
+ batch tensor after cp: labels torch.Size([1, 24576])
63757
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63758
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63759
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63760
+ batch tensor: tokens torch.Size([1, 98304])
63761
+ batch tensor: labels torch.Size([1, 98304])
63762
+ batch tensor: loss_mask torch.Size([1, 98304])
63763
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63764
+ batch tensor: position_ids torch.Size([1, 98304])
63765
+ batch tensor after cp: tokens torch.Size([1, 24576])
63766
+ batch tensor after cp: labels torch.Size([1, 24576])
63767
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63768
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63769
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63770
+ batch tensor: tokens torch.Size([1, 98304])
63771
+ batch tensor: labels torch.Size([1, 98304])
63772
+ batch tensor: loss_mask torch.Size([1, 98304])
63773
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63774
+ batch tensor: position_ids torch.Size([1, 98304])
63775
+ batch tensor after cp: tokens torch.Size([1, 24576])
63776
+ batch tensor after cp: labels torch.Size([1, 24576])
63777
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63778
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63779
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63780
+ batch tensor: tokens torch.Size([1, 98304])
63781
+ batch tensor: labels torch.Size([1, 98304])
63782
+ batch tensor: loss_mask torch.Size([1, 98304])
63783
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63784
+ batch tensor: position_ids torch.Size([1, 98304])
63785
+ batch tensor after cp: tokens torch.Size([1, 24576])
63786
+ batch tensor after cp: labels torch.Size([1, 24576])
63787
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63788
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63789
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63790
+ batch tensor: tokens torch.Size([1, 98304])
63791
+ batch tensor: labels torch.Size([1, 98304])
63792
+ batch tensor: loss_mask torch.Size([1, 98304])
63793
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63794
+ batch tensor: position_ids torch.Size([1, 98304])
63795
+ batch tensor after cp: tokens torch.Size([1, 24576])
63796
+ batch tensor after cp: labels torch.Size([1, 24576])
63797
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63798
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63799
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63800
+ batch tensor: tokens torch.Size([1, 98304])
63801
+ batch tensor: labels torch.Size([1, 98304])
63802
+ batch tensor: loss_mask torch.Size([1, 98304])
63803
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63804
+ batch tensor: position_ids torch.Size([1, 98304])
63805
+ batch tensor after cp: tokens torch.Size([1, 24576])
63806
+ batch tensor after cp: labels torch.Size([1, 24576])
63807
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63808
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63809
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63810
+ batch tensor: tokens torch.Size([1, 98304])
63811
+ batch tensor: labels torch.Size([1, 98304])
63812
+ batch tensor: loss_mask torch.Size([1, 98304])
63813
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63814
+ batch tensor: position_ids torch.Size([1, 98304])
63815
+ batch tensor after cp: tokens torch.Size([1, 24576])
63816
+ batch tensor after cp: labels torch.Size([1, 24576])
63817
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63818
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63819
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63820
+ batch tensor: tokens torch.Size([1, 98304])
63821
+ batch tensor: labels torch.Size([1, 98304])
63822
+ batch tensor: loss_mask torch.Size([1, 98304])
63823
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63824
+ batch tensor: position_ids torch.Size([1, 98304])
63825
+ batch tensor after cp: tokens torch.Size([1, 24576])
63826
+ batch tensor after cp: labels torch.Size([1, 24576])
63827
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63828
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63829
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63830
+ batch tensor: tokens torch.Size([1, 98304])
63831
+ batch tensor: labels torch.Size([1, 98304])
63832
+ batch tensor: loss_mask torch.Size([1, 98304])
63833
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63834
+ batch tensor: position_ids torch.Size([1, 98304])
63835
+ batch tensor after cp: tokens torch.Size([1, 24576])
63836
+ batch tensor after cp: labels torch.Size([1, 24576])
63837
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63838
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63839
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63840
+ batch tensor: tokens torch.Size([1, 98304])
63841
+ batch tensor: labels torch.Size([1, 98304])
63842
+ batch tensor: loss_mask torch.Size([1, 98304])
63843
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63844
+ batch tensor: position_ids torch.Size([1, 98304])
63845
+ batch tensor after cp: tokens torch.Size([1, 24576])
63846
+ batch tensor after cp: labels torch.Size([1, 24576])
63847
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63848
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63849
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63850
+ batch tensor: tokens torch.Size([1, 98304])
63851
+ batch tensor: labels torch.Size([1, 98304])
63852
+ batch tensor: loss_mask torch.Size([1, 98304])
63853
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63854
+ batch tensor: position_ids torch.Size([1, 98304])
63855
+ batch tensor after cp: tokens torch.Size([1, 24576])
63856
+ batch tensor after cp: labels torch.Size([1, 24576])
63857
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63858
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63859
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63860
+ batch tensor: tokens torch.Size([1, 98304])
63861
+ batch tensor: labels torch.Size([1, 98304])
63862
+ batch tensor: loss_mask torch.Size([1, 98304])
63863
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63864
+ batch tensor: position_ids torch.Size([1, 98304])
63865
+ batch tensor after cp: tokens torch.Size([1, 24576])
63866
+ batch tensor after cp: labels torch.Size([1, 24576])
63867
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63868
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63869
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63870
+ batch tensor: tokens torch.Size([1, 98304])
63871
+ batch tensor: labels torch.Size([1, 98304])
63872
+ batch tensor: loss_mask torch.Size([1, 98304])
63873
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63874
+ batch tensor: position_ids torch.Size([1, 98304])
63875
+ batch tensor after cp: tokens torch.Size([1, 24576])
63876
+ batch tensor after cp: labels torch.Size([1, 24576])
63877
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63878
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63879
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63880
+ batch tensor: tokens torch.Size([1, 98304])
63881
+ batch tensor: labels torch.Size([1, 98304])
63882
+ batch tensor: loss_mask torch.Size([1, 98304])
63883
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63884
+ batch tensor: position_ids torch.Size([1, 98304])
63885
+ batch tensor after cp: tokens torch.Size([1, 24576])
63886
+ batch tensor after cp: labels torch.Size([1, 24576])
63887
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63888
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63889
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63890
+ batch tensor: tokens torch.Size([1, 98304])
63891
+ batch tensor: labels torch.Size([1, 98304])
63892
+ batch tensor: loss_mask torch.Size([1, 98304])
63893
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63894
+ batch tensor: position_ids torch.Size([1, 98304])
63895
+ batch tensor after cp: tokens torch.Size([1, 24576])
63896
+ batch tensor after cp: labels torch.Size([1, 24576])
63897
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63898
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63899
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63900
+ batch tensor: tokens torch.Size([1, 98304])
63901
+ batch tensor: labels torch.Size([1, 98304])
63902
+ batch tensor: loss_mask torch.Size([1, 98304])
63903
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63904
+ batch tensor: position_ids torch.Size([1, 98304])
63905
+ batch tensor after cp: tokens torch.Size([1, 24576])
63906
+ batch tensor after cp: labels torch.Size([1, 24576])
63907
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63908
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63909
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63910
+ batch tensor: tokens torch.Size([1, 98304])
63911
+ batch tensor: labels torch.Size([1, 98304])
63912
+ batch tensor: loss_mask torch.Size([1, 98304])
63913
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63914
+ batch tensor: position_ids torch.Size([1, 98304])
63915
+ batch tensor after cp: tokens torch.Size([1, 24576])
63916
+ batch tensor after cp: labels torch.Size([1, 24576])
63917
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63918
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63919
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63920
+ batch tensor: tokens torch.Size([1, 98304])
63921
+ batch tensor: labels torch.Size([1, 98304])
63922
+ batch tensor: loss_mask torch.Size([1, 98304])
63923
+ batch tensor: attention_mask torch.Size([1, 1, 98304, 98304])
63924
+ batch tensor: position_ids torch.Size([1, 98304])
63925
+ batch tensor after cp: tokens torch.Size([1, 24576])
63926
+ batch tensor after cp: labels torch.Size([1, 24576])
63927
+ batch tensor after cp: loss_mask torch.Size([1, 24576])
63928
+ batch tensor after cp: attention_mask torch.Size([1, 1, 24576, 98304])
63929
+ batch tensor after cp: position_ids torch.Size([1, 24576])
63930
+ Start exporting trace 7
63931
+ Done exporting trace 7
63932
+ [2025-06-21 20:56:51] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 82575.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 |
attnserver.run_attnserver.slurm.sh.343196.err.log CHANGED
@@ -806,3 +806,330 @@ W0621 20:55:15.199000 1284949 site-packages/torch/distributed/run.py:766] ******
806
  warnings.warn(
807
  /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
808
  warnings.warn(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
806
  warnings.warn(
807
  /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
808
  warnings.warn(
809
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
810
+ warnings.warn(
811
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
812
+ warnings.warn(
813
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
814
+ warnings.warn(
815
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
816
+ warnings.warn(
817
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
818
+ warnings.warn(
819
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
820
+ warnings.warn(
821
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
822
+ warnings.warn(
823
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
824
+ warnings.warn(
825
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
826
+ warnings.warn(
827
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
828
+ warnings.warn(
829
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
830
+ warnings.warn(
831
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
832
+ warnings.warn(
833
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
834
+ warnings.warn(
835
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
836
+ warnings.warn(
837
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
838
+ warnings.warn(
839
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
840
+ warnings.warn(
841
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
842
+ warnings.warn(
843
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
844
+ warnings.warn(
845
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
846
+ warnings.warn(
847
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
848
+ warnings.warn(
849
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
850
+ warnings.warn(
851
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
852
+ warnings.warn(
853
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
854
+ warnings.warn(
855
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
856
+ warnings.warn(
857
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
858
+ warnings.warn(
859
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
860
+ warnings.warn(
861
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
862
+ warnings.warn(
863
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
864
+ warnings.warn(
865
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
866
+ warnings.warn(
867
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
868
+ warnings.warn(
869
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
870
+ warnings.warn(
871
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
872
+ warnings.warn(
873
+ [rank3]:[W621 20:56:10.109828052 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
874
+ [rank0]:[W621 20:56:10.156178280 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
875
+ [rank7]:[W621 20:56:10.170832667 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
876
+ [rank5]:[W621 20:56:10.173437427 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
877
+ [rank6]:[W621 20:56:10.300539223 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
878
+ [rank2]:[W621 20:56:10.301045424 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
879
+ [rank1]:[W621 20:56:10.503819028 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
880
+ [rank4]:[W621 20:56:10.537548726 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
881
+ [rank29]:[W621 20:56:10.130125578 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
882
+ [rank22]:[W621 20:56:10.060211900 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
883
+ [rank14]:[W621 20:56:10.971555579 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
884
+ [rank10]:[W621 20:56:10.972319312 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
885
+ [rank16]:[W621 20:56:10.065463051 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
886
+ [rank23]:[W621 20:56:10.072383187 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
887
+ [rank8]:[W621 20:56:10.984408490 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
888
+ [rank27]:[W621 20:56:10.163000675 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
889
+ [rank26]:[W621 20:56:10.171730969 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
890
+ [rank20]:[W621 20:56:11.096806608 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
891
+ [rank11]:[W621 20:56:11.014185267 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
892
+ [rank31]:[W621 20:56:11.210516452 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
893
+ [rank15]:[W621 20:56:11.061740494 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
894
+ [rank12]:[W621 20:56:11.126337700 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
895
+ [rank17]:[W621 20:56:11.222908099 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
896
+ [rank24]:[W621 20:56:11.334655579 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
897
+ [rank25]:[W621 20:56:11.361141535 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
898
+ [rank18]:[W621 20:56:11.285665594 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
899
+ [rank9]:[W621 20:56:11.192108375 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
900
+ [rank30]:[W621 20:56:11.372685394 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
901
+ [rank19]:[W621 20:56:11.306940243 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
902
+ [rank21]:[W621 20:56:11.334771504 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
903
+ [rank28]:[W621 20:56:11.456625339 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
904
+ [rank13]:[W621 20:56:11.334129354 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
905
+ + set +x
906
+ + set +x
907
+ + set +x
908
+ + set +x
909
+ + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
910
+ + export PROF_CTX_LENGTH=8192
911
+ + PROF_CTX_LENGTH=8192
912
+ + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp4.bs2.json'
913
+ + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp4.bs2.json' ']'
914
+ + echo 'Running ctx_length=8192, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=2'
915
+ + srun bash ./attnserver.sh
916
+ + which python3
917
+ + which python3
918
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343196 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-184:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
919
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343196 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-184:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
920
+ + which python3
921
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343196 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-184:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
922
+ + which python3
923
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343196 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-184:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
924
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
925
+ and will be removed in future. Use torchrun.
926
+ Note that --use-env is set by default in torchrun.
927
+ If your script expects `--local-rank` argument to be set, please
928
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
929
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
930
+ further instructions
931
+
932
+ main()
933
+ W0621 20:56:17.570000 1962565 site-packages/torch/distributed/run.py:766]
934
+ W0621 20:56:17.570000 1962565 site-packages/torch/distributed/run.py:766] *****************************************
935
+ W0621 20:56:17.570000 1962565 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
936
+ W0621 20:56:17.570000 1962565 site-packages/torch/distributed/run.py:766] *****************************************
937
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
938
+ and will be removed in future. Use torchrun.
939
+ Note that --use-env is set by default in torchrun.
940
+ If your script expects `--local-rank` argument to be set, please
941
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
942
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
943
+ further instructions
944
+
945
+ main()
946
+ W0621 20:56:17.620000 1490441 site-packages/torch/distributed/run.py:766]
947
+ W0621 20:56:17.620000 1490441 site-packages/torch/distributed/run.py:766] *****************************************
948
+ W0621 20:56:17.620000 1490441 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
949
+ W0621 20:56:17.620000 1490441 site-packages/torch/distributed/run.py:766] *****************************************
950
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
951
+ and will be removed in future. Use torchrun.
952
+ Note that --use-env is set by default in torchrun.
953
+ If your script expects `--local-rank` argument to be set, please
954
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
955
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
956
+ further instructions
957
+
958
+ main()
959
+ W0621 20:56:17.636000 1146683 site-packages/torch/distributed/run.py:766]
960
+ W0621 20:56:17.636000 1146683 site-packages/torch/distributed/run.py:766] *****************************************
961
+ W0621 20:56:17.636000 1146683 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
962
+ W0621 20:56:17.636000 1146683 site-packages/torch/distributed/run.py:766] *****************************************
963
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
964
+ and will be removed in future. Use torchrun.
965
+ Note that --use-env is set by default in torchrun.
966
+ If your script expects `--local-rank` argument to be set, please
967
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
968
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
969
+ further instructions
970
+
971
+ main()
972
+ W0621 20:56:17.638000 1288315 site-packages/torch/distributed/run.py:766]
973
+ W0621 20:56:17.638000 1288315 site-packages/torch/distributed/run.py:766] *****************************************
974
+ W0621 20:56:17.638000 1288315 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
975
+ W0621 20:56:17.638000 1288315 site-packages/torch/distributed/run.py:766] *****************************************
976
+ [rank8]:[W621 20:56:41.165572525 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
977
+ [rank24]:[W621 20:56:41.339421561 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
978
+ [rank0]:[W621 20:56:41.987211777 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
979
+ [rank7]:[W621 20:56:41.278598176 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
980
+ [rank31]:[W621 20:56:41.662173038 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
981
+ [rank23]:[W621 20:56:41.584657731 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
982
+ [rank16]:[W621 20:56:41.584720704 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
983
+ [rank15]:[W621 20:56:41.496165168 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
984
+ [rank19]:[W621 20:56:41.592380746 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
985
+ [rank3]:[W621 20:56:41.287152225 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
986
+ [rank11]:[W621 20:56:41.501514929 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
987
+ [rank27]:[W621 20:56:41.671927347 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
988
+ [rank12]:[W621 20:56:41.514036880 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
989
+ [rank4]:[W621 20:56:41.300496290 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
990
+ [rank20]:[W621 20:56:41.606207328 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
991
+ [rank28]:[W621 20:56:41.683874823 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
992
+ [rank6]:[W621 20:56:41.300635069 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
993
+ [rank30]:[W621 20:56:41.684486488 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
994
+ [rank22]:[W621 20:56:41.607879281 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
995
+ [rank1]:[W621 20:56:41.303601782 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
996
+ [rank2]:[W621 20:56:41.303680571 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
997
+ [rank14]:[W621 20:56:41.519501563 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
998
+ [rank17]:[W621 20:56:41.611275293 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
999
+ [rank10]:[W621 20:56:41.519807627 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1000
+ [rank9]:[W621 20:56:41.519995140 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1001
+ [rank25]:[W621 20:56:41.692623467 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1002
+ [rank5]:[W621 20:56:41.310171177 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1003
+ [rank18]:[W621 20:56:41.616387975 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1004
+ [rank13]:[W621 20:56:41.525819684 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1005
+ [rank26]:[W621 20:56:41.694656730 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1006
+ [rank29]:[W621 20:56:41.696301814 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1007
+ [rank21]:[W621 20:56:41.618949178 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1008
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1009
+ warnings.warn(
1010
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1011
+ warnings.warn(
1012
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1013
+ warnings.warn(
1014
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1015
+ warnings.warn(
1016
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1017
+ warnings.warn(
1018
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1019
+ warnings.warn(
1020
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1021
+ warnings.warn(
1022
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1023
+ warnings.warn(
1024
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1025
+ warnings.warn(
1026
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1027
+ warnings.warn(
1028
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1029
+ warnings.warn(
1030
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1031
+ warnings.warn(
1032
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1033
+ warnings.warn(
1034
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1035
+ warnings.warn(
1036
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1037
+ warnings.warn(
1038
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1039
+ warnings.warn(
1040
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1041
+ warnings.warn(
1042
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1043
+ warnings.warn(
1044
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1045
+ warnings.warn(
1046
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1047
+ warnings.warn(
1048
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1049
+ warnings.warn(
1050
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1051
+ warnings.warn(
1052
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1053
+ warnings.warn(
1054
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1055
+ warnings.warn(
1056
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1057
+ warnings.warn(
1058
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1059
+ warnings.warn(
1060
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1061
+ warnings.warn(
1062
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1063
+ warnings.warn(
1064
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1065
+ warnings.warn(
1066
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1067
+ warnings.warn(
1068
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1069
+ warnings.warn(
1070
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1071
+ warnings.warn(
1072
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1073
+ warnings.warn(
1074
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1075
+ warnings.warn(
1076
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1077
+ warnings.warn(
1078
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1079
+ warnings.warn(
1080
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1081
+ warnings.warn(
1082
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1083
+ warnings.warn(
1084
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1085
+ warnings.warn(
1086
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1087
+ warnings.warn(
1088
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1089
+ warnings.warn(
1090
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1091
+ warnings.warn(
1092
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1093
+ warnings.warn(
1094
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1095
+ warnings.warn(
1096
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1097
+ warnings.warn(
1098
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1099
+ warnings.warn(
1100
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1101
+ warnings.warn(
1102
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1103
+ warnings.warn(
1104
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1105
+ warnings.warn(
1106
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1107
+ warnings.warn(
1108
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1109
+ warnings.warn(
1110
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1111
+ warnings.warn(
1112
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1113
+ warnings.warn(
1114
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1115
+ warnings.warn(
1116
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1117
+ warnings.warn(
1118
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1119
+ warnings.warn(
1120
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1121
+ warnings.warn(
1122
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1123
+ warnings.warn(
1124
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1125
+ warnings.warn(
1126
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1127
+ warnings.warn(
1128
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1129
+ warnings.warn(
1130
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1131
+ warnings.warn(
1132
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1133
+ warnings.warn(
1134
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1135
+ warnings.warn(
attnserver.run_attnserver.slurm.sh.343196.out.log CHANGED
The diff for this file is too large to render. See raw diff
 
attnserver.run_attnserver.slurm.sh.343197.err.log CHANGED
@@ -741,3 +741,395 @@ W0621 20:55:24.659000 90607 site-packages/torch/distributed/run.py:766] ********
741
  [rank18]:[W621 20:55:48.526385627 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
742
  [rank10]:[W621 20:55:48.074834524 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
743
  [rank26]:[W621 20:55:48.597233459 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741
  [rank18]:[W621 20:55:48.526385627 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
742
  [rank10]:[W621 20:55:48.074834524 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
743
  [rank26]:[W621 20:55:48.597233459 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
744
+ [rank11]:[W621 20:55:48.082992669 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
745
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
746
+ warnings.warn(
747
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
748
+ warnings.warn(
749
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
750
+ warnings.warn(
751
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
752
+ warnings.warn(
753
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
754
+ warnings.warn(
755
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
756
+ warnings.warn(
757
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
758
+ warnings.warn(
759
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
760
+ warnings.warn(
761
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
762
+ warnings.warn(
763
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
764
+ warnings.warn(
765
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
766
+ warnings.warn(
767
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
768
+ warnings.warn(
769
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
770
+ warnings.warn(
771
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
772
+ warnings.warn(
773
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
774
+ warnings.warn(
775
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
776
+ warnings.warn(
777
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
778
+ warnings.warn(
779
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
780
+ warnings.warn(
781
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
782
+ warnings.warn(
783
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
784
+ warnings.warn(
785
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
786
+ warnings.warn(
787
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
788
+ warnings.warn(
789
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
790
+ warnings.warn(
791
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
792
+ warnings.warn(
793
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
794
+ warnings.warn(
795
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
796
+ warnings.warn(
797
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
798
+ warnings.warn(
799
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
800
+ warnings.warn(
801
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
802
+ warnings.warn(
803
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
804
+ warnings.warn(
805
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
806
+ warnings.warn(
807
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
808
+ warnings.warn(
809
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
810
+ warnings.warn(
811
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
812
+ warnings.warn(
813
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
814
+ warnings.warn(
815
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
816
+ warnings.warn(
817
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
818
+ warnings.warn(
819
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
820
+ warnings.warn(
821
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
822
+ warnings.warn(
823
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
824
+ warnings.warn(
825
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
826
+ warnings.warn(
827
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
828
+ warnings.warn(
829
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
830
+ warnings.warn(
831
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
832
+ warnings.warn(
833
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
834
+ warnings.warn(
835
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
836
+ warnings.warn(
837
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
838
+ warnings.warn(
839
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
840
+ warnings.warn(
841
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
842
+ warnings.warn(
843
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
844
+ warnings.warn(
845
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
846
+ warnings.warn(
847
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
848
+ warnings.warn(
849
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
850
+ warnings.warn(
851
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
852
+ warnings.warn(
853
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
854
+ warnings.warn(
855
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
856
+ warnings.warn(
857
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
858
+ warnings.warn(
859
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
860
+ warnings.warn(
861
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
862
+ warnings.warn(
863
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
864
+ warnings.warn(
865
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
866
+ warnings.warn(
867
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
868
+ warnings.warn(
869
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
870
+ warnings.warn(
871
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
872
+ warnings.warn(
873
+ [rank2]:[W621 20:56:17.833210784 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
874
+ [rank3]:[W621 20:56:17.919155879 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
875
+ [rank0]:[W621 20:56:18.082886191 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
876
+ [rank7]:[W621 20:56:18.176335022 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
877
+ [rank5]:[W621 20:56:18.195628372 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
878
+ [rank1]:[W621 20:56:18.271562707 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
879
+ [rank4]:[W621 20:56:18.284545199 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
880
+ [rank6]:[W621 20:56:18.285067581 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
881
+ [rank15]:[W621 20:56:18.532709993 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
882
+ [rank20]:[W621 20:56:18.040747600 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
883
+ [rank9]:[W621 20:56:18.595604277 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
884
+ [rank8]:[W621 20:56:18.625422273 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
885
+ [rank17]:[W621 20:56:18.079012598 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
886
+ [rank24]:[W621 20:56:18.156616486 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
887
+ [rank31]:[W621 20:56:18.164988413 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
888
+ [rank13]:[W621 20:56:18.647496686 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
889
+ [rank23]:[W621 20:56:18.106595365 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
890
+ [rank27]:[W621 20:56:18.177668560 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
891
+ [rank30]:[W621 20:56:18.181180272 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
892
+ [rank12]:[W621 20:56:18.668488847 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
893
+ [rank14]:[W621 20:56:18.740423592 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
894
+ [rank29]:[W621 20:56:18.290116868 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
895
+ [rank25]:[W621 20:56:18.319021083 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
896
+ [rank11]:[W621 20:56:18.807381658 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
897
+ [rank19]:[W621 20:56:18.270534017 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
898
+ [rank10]:[W621 20:56:18.849345374 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
899
+ [rank22]:[W621 20:56:18.314325015 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
900
+ [rank16]:[W621 20:56:18.319878517 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
901
+ [rank21]:[W621 20:56:18.322660788 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
902
+ [rank18]:[W621 20:56:18.332722569 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
903
+ [rank26]:[W621 20:56:19.452088514 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
904
+ [rank28]:[W621 20:56:19.512417259 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
905
+ + set +x
906
+ + set +x
907
+ + set +x
908
+ + set +x
909
+ + for ctx_length in 1024 2048 4096 8192 12288 16384 24576 32768 40960 49152 65536 81920 98304 131072
910
+ + export PROF_CTX_LENGTH=8192
911
+ + PROF_CTX_LENGTH=8192
912
+ + name='/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp4.bs4.json'
913
+ + '[' -f '/mnt/sharefs/users/hao.zhang/junda/megatron-prof-data--unstable-v5/mytrace.L8192*tp8.cp4.bs4.json' ']'
914
+ + echo 'Running ctx_length=8192, TP_SIZE=8, CP_SIZE=4, BATCH_SIZE=4'
915
+ + srun bash ./attnserver.sh
916
+ + which python3
917
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 3 --rdzv_id 343197 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
918
+ + which python3
919
+ + which python3
920
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 1 --rdzv_id 343197 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
921
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 2 --rdzv_id 343197 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
922
+ + which python3
923
+ + python3 -m torch.distributed.launch --nproc_per_node 8 --nnodes 4 --node_rank 0 --rdzv_id 343197 --rdzv_backend c10d --rdzv_endpoint fs-mbz-gpu-852:29500 ./pretrain_gpt_profile.py --tensor-model-parallel-size 8 --context-parallel-size 4 --num-layers 2 --hidden-size 4096 --num-attention-heads 64 --group-query-attention --num-query-groups 16 --seq-length 8192 --max-position-embeddings 8192 --micro-batch-size 1 --global-batch-size 1 --lr 0.0005 --train-iters 10 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 1 --save-interval 16 --eval-interval 16 --eval-iters 1 --vocab-file vocab.json --merge-file merges.txt --save gpt-checkpoint --load gpt-checkpoint --logging-level 0 --mock-data --tensorboard-dir tensorboard-logs/
924
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
925
+ and will be removed in future. Use torchrun.
926
+ Note that --use-env is set by default in torchrun.
927
+ If your script expects `--local-rank` argument to be set, please
928
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
929
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
930
+ further instructions
931
+
932
+ main()
933
+ W0621 20:56:24.680000 2014302 site-packages/torch/distributed/run.py:766]
934
+ W0621 20:56:24.680000 2014302 site-packages/torch/distributed/run.py:766] *****************************************
935
+ W0621 20:56:24.680000 2014302 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
936
+ W0621 20:56:24.680000 2014302 site-packages/torch/distributed/run.py:766] *****************************************
937
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
938
+ and will be removed in future. Use torchrun.
939
+ Note that --use-env is set by default in torchrun.
940
+ If your script expects `--local-rank` argument to be set, please
941
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
942
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
943
+ further instructions
944
+
945
+ main()
946
+ W0621 20:56:24.686000 3313413 site-packages/torch/distributed/run.py:766]
947
+ W0621 20:56:24.686000 3313413 site-packages/torch/distributed/run.py:766] *****************************************
948
+ W0621 20:56:24.686000 3313413 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
949
+ W0621 20:56:24.686000 3313413 site-packages/torch/distributed/run.py:766] *****************************************
950
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
951
+ and will be removed in future. Use torchrun.
952
+ Note that --use-env is set by default in torchrun.
953
+ If your script expects `--local-rank` argument to be set, please
954
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
955
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
956
+ further instructions
957
+
958
+ main()
959
+ W0621 20:56:24.719000 3381855 site-packages/torch/distributed/run.py:766]
960
+ W0621 20:56:24.719000 3381855 site-packages/torch/distributed/run.py:766] *****************************************
961
+ W0621 20:56:24.719000 3381855 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
962
+ W0621 20:56:24.719000 3381855 site-packages/torch/distributed/run.py:766] *****************************************
963
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py:207: FutureWarning: The module torch.distributed.launch is deprecated
964
+ and will be removed in future. Use torchrun.
965
+ Note that --use-env is set by default in torchrun.
966
+ If your script expects `--local-rank` argument to be set, please
967
+ change it to read from `os.environ['LOCAL_RANK']` instead. See
968
+ https://pytorch.org/docs/stable/distributed.html#launch-utility for
969
+ further instructions
970
+
971
+ main()
972
+ W0621 20:56:24.773000 93763 site-packages/torch/distributed/run.py:766]
973
+ W0621 20:56:24.773000 93763 site-packages/torch/distributed/run.py:766] *****************************************
974
+ W0621 20:56:24.773000 93763 site-packages/torch/distributed/run.py:766] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
975
+ W0621 20:56:24.773000 93763 site-packages/torch/distributed/run.py:766] *****************************************
976
+ [rank16]:[W621 20:56:47.849025050 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 16] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
977
+ [rank8]:[W621 20:56:47.407945859 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 8] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
978
+ [rank0]:[W621 20:56:47.538697134 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 0] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
979
+ [rank24]:[W621 20:56:47.331969292 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 24] using GPU 0 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
980
+ [rank31]:[W621 20:56:47.354127408 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 31] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
981
+ [rank26]:[W621 20:56:47.355249776 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 26] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
982
+ [rank29]:[W621 20:56:47.357530084 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 29] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
983
+ [rank2]:[W621 20:56:47.950001962 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 2] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
984
+ [rank7]:[W621 20:56:47.950095575 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 7] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
985
+ [rank23]:[W621 20:56:47.290005553 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 23] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
986
+ [rank18]:[W621 20:56:47.290013965 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 18] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
987
+ [rank21]:[W621 20:56:47.290029202 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 21] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
988
+ [rank28]:[W621 20:56:47.358909564 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 28] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
989
+ [rank25]:[W621 20:56:47.359084205 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 25] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
990
+ [rank13]:[W621 20:56:47.839892571 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 13] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
991
+ [rank12]:[W621 20:56:47.839931960 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 12] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
992
+ [rank15]:[W621 20:56:47.840012767 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 15] using GPU 7 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
993
+ [rank4]:[W621 20:56:47.957666124 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 4] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
994
+ [rank1]:[W621 20:56:47.957968668 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 1] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
995
+ [rank17]:[W621 20:56:47.297662040 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 17] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
996
+ [rank9]:[W621 20:56:47.845455671 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 9] using GPU 1 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
997
+ [rank20]:[W621 20:56:47.298584785 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 20] using GPU 4 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
998
+ [rank10]:[W621 20:56:47.846037228 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 10] using GPU 2 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
999
+ [rank22]:[W621 20:56:47.298974670 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 22] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1000
+ [rank14]:[W621 20:56:47.847066889 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 14] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1001
+ [rank27]:[W621 20:56:47.372572128 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 27] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1002
+ [rank5]:[W621 20:56:47.965740978 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 5] using GPU 5 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1003
+ [rank6]:[W621 20:56:47.965775610 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 6] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1004
+ [rank30]:[W621 20:56:47.374060775 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 30] using GPU 6 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1005
+ [rank19]:[W621 20:56:47.305805439 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 19] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1006
+ [rank3]:[W621 20:56:47.966607106 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 3] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1007
+ [rank11]:[W621 20:56:47.853956597 ProcessGroupNCCL.cpp:4715] [PG ID 0 PG GUID 0 Rank 11] using GPU 3 as device used by this process is currently unknown. This can potentially cause a hang if this rank to GPU mapping is incorrect. You can pecify device_id in init_process_group() to force use of a particular device.
1008
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1009
+ warnings.warn(
1010
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1011
+ warnings.warn(
1012
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1013
+ warnings.warn(
1014
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1015
+ warnings.warn(
1016
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1017
+ warnings.warn(
1018
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1019
+ warnings.warn(
1020
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1021
+ warnings.warn(
1022
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1023
+ warnings.warn(
1024
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1025
+ warnings.warn(
1026
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1027
+ warnings.warn(
1028
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1029
+ warnings.warn(
1030
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1031
+ warnings.warn(
1032
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1033
+ warnings.warn(
1034
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1035
+ warnings.warn(
1036
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1037
+ warnings.warn(
1038
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1039
+ warnings.warn(
1040
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1041
+ warnings.warn(
1042
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1043
+ warnings.warn(
1044
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1045
+ warnings.warn(
1046
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1047
+ warnings.warn(
1048
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1049
+ warnings.warn(
1050
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1051
+ warnings.warn(
1052
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1053
+ warnings.warn(
1054
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1055
+ warnings.warn(
1056
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1057
+ warnings.warn(
1058
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1059
+ warnings.warn(
1060
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1061
+ warnings.warn(
1062
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1063
+ warnings.warn(
1064
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1065
+ warnings.warn(
1066
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1067
+ warnings.warn(
1068
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1069
+ warnings.warn(
1070
+ /mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/models/gpt/gpt_layer_specs.py:94: UserWarning: The fp8 argument in "get_gpt_layer_with_transformer_engine_spec" has been deprecated and will be removed soon. Please update your code accordingly.
1071
+ warnings.warn(
1072
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1073
+ warnings.warn(
1074
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1075
+ warnings.warn(
1076
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1077
+ warnings.warn(
1078
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1079
+ warnings.warn(
1080
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1081
+ warnings.warn(
1082
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1083
+ warnings.warn(
1084
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1085
+ warnings.warn(
1086
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1087
+ warnings.warn(
1088
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1089
+ warnings.warn(
1090
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1091
+ warnings.warn(
1092
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1093
+ warnings.warn(
1094
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1095
+ warnings.warn(
1096
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1097
+ warnings.warn(
1098
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1099
+ warnings.warn(
1100
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1101
+ warnings.warn(
1102
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1103
+ warnings.warn(
1104
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1105
+ warnings.warn(
1106
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1107
+ warnings.warn(
1108
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1109
+ warnings.warn(
1110
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1111
+ warnings.warn(
1112
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1113
+ warnings.warn(
1114
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1115
+ warnings.warn(
1116
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1117
+ warnings.warn(
1118
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1119
+ warnings.warn(
1120
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1121
+ warnings.warn(
1122
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1123
+ warnings.warn(
1124
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1125
+ warnings.warn(
1126
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1127
+ warnings.warn(
1128
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1129
+ warnings.warn(
1130
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1131
+ warnings.warn(
1132
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1133
+ warnings.warn(
1134
+ /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/transformer_engine/pytorch/cpu_offload.py:595: DeprecationWarning: Offloading weights is deprecated. Using offload_weights=True does not have any effect.
1135
+ warnings.warn(
attnserver.run_attnserver.slurm.sh.343197.out.log CHANGED
The diff for this file is too large to render. See raw diff
 
attnserver.run_attnserver.slurm.sh.343201.err.log CHANGED
@@ -7399,3 +7399,291 @@ W0621 20:38:05.515000 2488203 site-packages/torch/distributed/run.py:766] ******
7399
  [rank0]: torch._C.PyTorchFileWriter(
7400
  [rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
7401
  [rank0]:[W621 20:55:47.083380764 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7399
  [rank0]: torch._C.PyTorchFileWriter(
7400
  [rank0]: RuntimeError: Parent directory gpt-checkpoint/iter_0000010 does not exist.
7401
  [rank0]:[W621 20:55:47.083380764 ProcessGroupNCCL.cpp:1476] Warning: WARNING: destroy_process_group() was not called before program exit, which can leak resources. For more info, please see https://pytorch.org/docs/stable/distributed.html#shutdown (function operator())
7402
+ W0621 20:56:12.736000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886821 closing signal SIGTERM
7403
+ W0621 20:56:12.740000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886822 closing signal SIGTERM
7404
+ W0621 20:56:12.749000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886823 closing signal SIGTERM
7405
+ W0621 20:56:12.751000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886824 closing signal SIGTERM
7406
+ W0621 20:56:12.760000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886825 closing signal SIGTERM
7407
+ W0621 20:56:12.764000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886826 closing signal SIGTERM
7408
+ W0621 20:56:12.768000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 3886827 closing signal SIGTERM
7409
+ E0621 20:56:14.808000 3886749 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 0 (pid: 3886820) of binary: /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3
7410
+ Traceback (most recent call last):
7411
+ File "<frozen runpy>", line 198, in _run_module_as_main
7412
+ File "<frozen runpy>", line 88, in _run_code
7413
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
7414
+ main()
7415
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
7416
+ return arg(*args, **kwargs)
7417
+ ^^^^^^^^^^^^^^^^^^^^
7418
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
7419
+ launch(args)
7420
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
7421
+ run(args)
7422
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
7423
+ elastic_launch(
7424
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
7425
+ return launch_agent(self._config, self._entrypoint, list(args))
7426
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
7427
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 270, in launch_agent
7428
+ raise ChildFailedError(
7429
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
7430
+ ============================================================
7431
+ ./pretrain_gpt_profile.py FAILED
7432
+ ------------------------------------------------------------
7433
+ Failures:
7434
+ <NO_OTHER_FAILURES>
7435
+ ------------------------------------------------------------
7436
+ Root Cause (first observed failure):
7437
+ [0]:
7438
+ time : 2025-06-21_20:56:12
7439
+ host : fs-mbz-gpu-728
7440
+ rank : 0 (local_rank: 0)
7441
+ exitcode : 1 (pid: 3886820)
7442
+ error_file: <N/A>
7443
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
7444
+ ============================================================
7445
+ + set +x
7446
+ [rank8]:[W621 20:56:15.850896161 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=75, addr=[fs-mbz-gpu-865]:35796, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7447
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7448
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14b1c8b785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7449
+ frame #1: <unknown function> + 0x5ba8afe (0x14b1b1a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7450
+ frame #2: <unknown function> + 0x5baae40 (0x14b1b1a5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7451
+ frame #3: <unknown function> + 0x5bab74a (0x14b1b1a5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7452
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14b1b1a571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7453
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14b16ec509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7454
+ frame #6: <unknown function> + 0xd3b6d (0x14b1c86f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7455
+ frame #7: <unknown function> + 0x94ac3 (0x14b1c9c1aac3 in /lib/x86_64-linux-gnu/libc.so.6)
7456
+ frame #8: <unknown function> + 0x126850 (0x14b1c9cac850 in /lib/x86_64-linux-gnu/libc.so.6)
7457
+
7458
+ [rank8]:[W621 20:56:15.854945200 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 8] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7459
+ [rank13]:[W621 20:56:15.890121083 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-865]:35830, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7460
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7461
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1526b3d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7462
+ frame #1: <unknown function> + 0x5ba8afe (0x15269d05aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7463
+ frame #2: <unknown function> + 0x5baae40 (0x15269d05ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7464
+ frame #3: <unknown function> + 0x5bab74a (0x15269d05d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7465
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x15269d0571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7466
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x15265a2509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7467
+ frame #6: <unknown function> + 0xd3b6d (0x15264a219b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7468
+ frame #7: <unknown function> + 0x94ac3 (0x1526b517bac3 in /lib/x86_64-linux-gnu/libc.so.6)
7469
+ frame #8: <unknown function> + 0x126850 (0x1526b520d850 in /lib/x86_64-linux-gnu/libc.so.6)
7470
+
7471
+ [rank13]:[W621 20:56:15.893744899 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 13] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7472
+ [rank9]:[W621 20:56:15.890050954 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-865]:35828, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7473
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7474
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14d258d785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7475
+ frame #1: <unknown function> + 0x5ba8afe (0x14d241c5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7476
+ frame #2: <unknown function> + 0x5baae40 (0x14d241c5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7477
+ frame #3: <unknown function> + 0x5bab74a (0x14d241c5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7478
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14d241c571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7479
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14d1fee509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7480
+ frame #6: <unknown function> + 0xd3b6d (0x14d2588f1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7481
+ frame #7: <unknown function> + 0x94ac3 (0x14d259da0ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7482
+ frame #8: <unknown function> + 0x126850 (0x14d259e32850 in /lib/x86_64-linux-gnu/libc.so.6)
7483
+
7484
+ [rank9]:[W621 20:56:15.894407524 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 9] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7485
+ [rank10]:[W621 20:56:15.890112914 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-865]:35810, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7486
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7487
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x14acf1f785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7488
+ frame #1: <unknown function> + 0x5ba8afe (0x14acdae5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7489
+ frame #2: <unknown function> + 0x5baae40 (0x14acdae5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7490
+ frame #3: <unknown function> + 0x5bab74a (0x14acdae5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7491
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x14acdae571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7492
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x14ac980509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7493
+ frame #6: <unknown function> + 0xd3b6d (0x14acf1af1b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7494
+ frame #7: <unknown function> + 0x94ac3 (0x14acf3078ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7495
+ frame #8: <unknown function> + 0x126850 (0x14acf310a850 in /lib/x86_64-linux-gnu/libc.so.6)
7496
+
7497
+ [rank10]:[W621 20:56:15.894711931 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 10] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7498
+ [rank15]:[W621 20:56:15.892921773 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-865]:35804, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7499
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7500
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x146bbfd785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7501
+ frame #1: <unknown function> + 0x5ba8afe (0x146ba905aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7502
+ frame #2: <unknown function> + 0x5baae40 (0x146ba905ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7503
+ frame #3: <unknown function> + 0x5bab74a (0x146ba905d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7504
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x146ba90571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7505
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x146b662509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7506
+ frame #6: <unknown function> + 0xd3b6d (0x146b56219b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7507
+ frame #7: <unknown function> + 0x94ac3 (0x146bc1140ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7508
+ frame #8: <unknown function> + 0x126850 (0x146bc11d2850 in /lib/x86_64-linux-gnu/libc.so.6)
7509
+
7510
+ [rank15]:[W621 20:56:15.896809244 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 15] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7511
+ [rank14]:[W621 20:56:15.894397980 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-865]:35814, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7512
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7513
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x1482bb5785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7514
+ frame #1: <unknown function> + 0x5ba8afe (0x1482a485aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7515
+ frame #2: <unknown function> + 0x5baae40 (0x1482a485ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7516
+ frame #3: <unknown function> + 0x5bab74a (0x1482a485d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7517
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x1482a48571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7518
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x148261a509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7519
+ frame #6: <unknown function> + 0xd3b6d (0x148251a19b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7520
+ frame #7: <unknown function> + 0x94ac3 (0x1482bc965ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7521
+ frame #8: <unknown function> + 0x126850 (0x1482bc9f7850 in /lib/x86_64-linux-gnu/libc.so.6)
7522
+
7523
+ [rank14]:[W621 20:56:15.898310388 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 14] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7524
+ [rank12]:[W621 20:56:15.895533320 TCPStore.cpp:125] [c10d] recvValue failed on SocketImpl(fd=95, addr=[fs-mbz-gpu-865]:35846, remote=[fs-mbz-gpu-728]:41397): failed to recv, got 0 bytes
7525
+ Exception raised from recvBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:678 (most recent call first):
7526
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x146ba29785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7527
+ frame #1: <unknown function> + 0x5ba8afe (0x146b8bc5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7528
+ frame #2: <unknown function> + 0x5baae40 (0x146b8bc5ce40 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7529
+ frame #3: <unknown function> + 0x5bab74a (0x146b8bc5d74a in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7530
+ frame #4: c10d::TCPStore::check(std::vector<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::allocator<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > > > const&) + 0x2a9 (0x146b8bc571a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7531
+ frame #5: c10d::ProcessGroupNCCL::heartbeatMonitor() + 0x379 (0x146b48e509a9 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cuda.so)
7532
+ frame #6: <unknown function> + 0xd3b6d (0x146b38e19b6d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/../lib/libstdc++.so.6)
7533
+ frame #7: <unknown function> + 0x94ac3 (0x146ba3d1aac3 in /lib/x86_64-linux-gnu/libc.so.6)
7534
+ frame #8: <unknown function> + 0x126850 (0x146ba3dac850 in /lib/x86_64-linux-gnu/libc.so.6)
7535
+
7536
+ [rank12]:[W621 20:56:15.899106259 ProcessGroupNCCL.cpp:1659] [PG ID 0 PG GUID 0(default_pg) Rank 12] Failed to check the "should dump" flag on TCPStore, (maybe TCPStore server has shut down too early), with error: failed to recv, got 0 bytes
7537
+ W0621 20:56:15.611000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488272 closing signal SIGTERM
7538
+ W0621 20:56:15.614000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488273 closing signal SIGTERM
7539
+ W0621 20:56:15.616000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488274 closing signal SIGTERM
7540
+ W0621 20:56:15.619000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488275 closing signal SIGTERM
7541
+ W0621 20:56:15.622000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488276 closing signal SIGTERM
7542
+ W0621 20:56:15.626000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488277 closing signal SIGTERM
7543
+ W0621 20:56:15.630000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488278 closing signal SIGTERM
7544
+ W0621 20:56:15.632000 2488203 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 2488279 closing signal SIGTERM
7545
+ [W621 20:56:16.919203229 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-865]:33784, remote=[fs-mbz-gpu-728]:29500): Broken pipe
7546
+ Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
7547
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148acab785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7548
+ frame #1: <unknown function> + 0x5ba8afe (0x148ab3a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7549
+ frame #2: <unknown function> + 0x5baa358 (0x148ab3a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7550
+ frame #3: <unknown function> + 0x5babb3e (0x148ab3a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7551
+ frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148ab3a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7552
+ frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148ab3a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7553
+ frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148ab3a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7554
+ frame #7: <unknown function> + 0xc0f526 (0x148ac2d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7555
+ frame #8: <unknown function> + 0x37f17d (0x148ac24fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7556
+ <omitting python frames>
7557
+ frame #17: <unknown function> + 0x94ac3 (0x148acbc35ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7558
+ frame #18: <unknown function> + 0x126850 (0x148acbcc7850 in /lib/x86_64-linux-gnu/libc.so.6)
7559
+
7560
+ W0621 20:56:16.600000 2488203 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-865_2488203_0' has failed to send a keep-alive heartbeat to the rendezvous '343201' due to an error of type RendezvousConnectionError.
7561
+ [W621 20:56:21.929682945 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-865]:33784, remote=[fs-mbz-gpu-728]:29500): Broken pipe
7562
+ Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
7563
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148acab785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7564
+ frame #1: <unknown function> + 0x5ba8afe (0x148ab3a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7565
+ frame #2: <unknown function> + 0x5baa358 (0x148ab3a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7566
+ frame #3: <unknown function> + 0x5babb3e (0x148ab3a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7567
+ frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148ab3a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7568
+ frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148ab3a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7569
+ frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148ab3a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7570
+ frame #7: <unknown function> + 0xc0f526 (0x148ac2d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7571
+ frame #8: <unknown function> + 0x37f17d (0x148ac24fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7572
+ <omitting python frames>
7573
+ frame #17: <unknown function> + 0x94ac3 (0x148acbc35ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7574
+ frame #18: <unknown function> + 0x126850 (0x148acbcc7850 in /lib/x86_64-linux-gnu/libc.so.6)
7575
+
7576
+ W0621 20:56:21.607000 2488203 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-865_2488203_0' has failed to send a keep-alive heartbeat to the rendezvous '343201' due to an error of type RendezvousConnectionError.
7577
+ [W621 20:56:26.937155280 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-865]:33784, remote=[fs-mbz-gpu-728]:29500): Broken pipe
7578
+ Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
7579
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148acab785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7580
+ frame #1: <unknown function> + 0x5ba8afe (0x148ab3a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7581
+ frame #2: <unknown function> + 0x5baa358 (0x148ab3a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7582
+ frame #3: <unknown function> + 0x5babb3e (0x148ab3a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7583
+ frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148ab3a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7584
+ frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148ab3a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7585
+ frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148ab3a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7586
+ frame #7: <unknown function> + 0xc0f526 (0x148ac2d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7587
+ frame #8: <unknown function> + 0x37f17d (0x148ac24fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7588
+ <omitting python frames>
7589
+ frame #17: <unknown function> + 0x94ac3 (0x148acbc35ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7590
+ frame #18: <unknown function> + 0x126850 (0x148acbcc7850 in /lib/x86_64-linux-gnu/libc.so.6)
7591
+
7592
+ W0621 20:56:26.614000 2488203 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-865_2488203_0' has failed to send a keep-alive heartbeat to the rendezvous '343201' due to an error of type RendezvousConnectionError.
7593
+ [W621 20:56:31.943237836 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-865]:33784, remote=[fs-mbz-gpu-728]:29500): Broken pipe
7594
+ Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
7595
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148acab785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7596
+ frame #1: <unknown function> + 0x5ba8afe (0x148ab3a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7597
+ frame #2: <unknown function> + 0x5baa358 (0x148ab3a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7598
+ frame #3: <unknown function> + 0x5babb3e (0x148ab3a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7599
+ frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148ab3a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7600
+ frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148ab3a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7601
+ frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148ab3a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7602
+ frame #7: <unknown function> + 0xc0f526 (0x148ac2d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7603
+ frame #8: <unknown function> + 0x37f17d (0x148ac24fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7604
+ <omitting python frames>
7605
+ frame #17: <unknown function> + 0x94ac3 (0x148acbc35ac3 in /lib/x86_64-linux-gnu/libc.so.6)
7606
+ frame #18: <unknown function> + 0x126850 (0x148acbcc7850 in /lib/x86_64-linux-gnu/libc.so.6)
7607
+
7608
+ W0621 20:56:31.623000 2488203 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1341] The node 'fs-mbz-gpu-865_2488203_0' has failed to send a keep-alive heartbeat to the rendezvous '343201' due to an error of type RendezvousConnectionError.
7609
+ [W621 20:56:36.340415404 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-865]:33784, remote=[fs-mbz-gpu-728]:29500): Broken pipe
7610
+ Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
7611
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148acab785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7612
+ frame #1: <unknown function> + 0x5ba8afe (0x148ab3a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7613
+ frame #2: <unknown function> + 0x5baa358 (0x148ab3a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7614
+ frame #3: <unknown function> + 0x5babb3e (0x148ab3a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7615
+ frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148ab3a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7616
+ frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148ab3a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7617
+ frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148ab3a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7618
+ frame #7: <unknown function> + 0xc0f526 (0x148ac2d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7619
+ frame #8: <unknown function> + 0x37f17d (0x148ac24fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7620
+ <omitting python frames>
7621
+ frame #26: <unknown function> + 0x29d90 (0x148acbbcad90 in /lib/x86_64-linux-gnu/libc.so.6)
7622
+ frame #27: __libc_start_main + 0x80 (0x148acbbcae40 in /lib/x86_64-linux-gnu/libc.so.6)
7623
+
7624
+ W0621 20:56:36.026000 2488203 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-865_2488203_0' has failed to shutdown the rendezvous '343201' due to an error of type RendezvousConnectionError.
7625
+ [W621 20:56:36.356868549 TCPStore.cpp:106] [c10d] sendBytes failed on SocketImpl(fd=3, addr=[fs-mbz-gpu-865]:33784, remote=[fs-mbz-gpu-728]:29500): Broken pipe
7626
+ Exception raised from sendBytes at /pytorch/torch/csrc/distributed/c10d/Utils.hpp:653 (most recent call first):
7627
+ frame #0: c10::Error::Error(c10::SourceLocation, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >) + 0x98 (0x148acab785e8 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libc10.so)
7628
+ frame #1: <unknown function> + 0x5ba8afe (0x148ab3a5aafe in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7629
+ frame #2: <unknown function> + 0x5baa358 (0x148ab3a5c358 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7630
+ frame #3: <unknown function> + 0x5babb3e (0x148ab3a5db3e in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7631
+ frame #4: c10d::TCPStore::doWait(c10::ArrayRef<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x1a6 (0x148ab3a57ac6 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7632
+ frame #5: c10d::TCPStore::doGet(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0x33 (0x148ab3a57ea3 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7633
+ frame #6: c10d::TCPStore::get(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&) + 0xab (0x148ab3a58f8b in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_cpu.so)
7634
+ frame #7: <unknown function> + 0xc0f526 (0x148ac2d8b526 in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7635
+ frame #8: <unknown function> + 0x37f17d (0x148ac24fb17d in /mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/lib/libtorch_python.so)
7636
+ <omitting python frames>
7637
+ frame #26: <unknown function> + 0x29d90 (0x148acbbcad90 in /lib/x86_64-linux-gnu/libc.so.6)
7638
+ frame #27: __libc_start_main + 0x80 (0x148acbbcae40 in /lib/x86_64-linux-gnu/libc.so.6)
7639
+
7640
+ W0621 20:56:36.038000 2488203 site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1292] The node 'fs-mbz-gpu-865_2488203_0' has failed to shutdown the rendezvous '343201' due to an error of type RendezvousConnectionError.
7641
+ Traceback (most recent call last):
7642
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 117, in _call_store
7643
+ return getattr(self._store, store_op)(*args, **kwargs)
7644
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
7645
+ torch.distributed.DistNetworkError: failed to recv, got 0 bytes
7646
+
7647
+ The above exception was the direct cause of the following exception:
7648
+
7649
+ Traceback (most recent call last):
7650
+ File "<frozen runpy>", line 198, in _run_module_as_main
7651
+ File "<frozen runpy>", line 88, in _run_code
7652
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 207, in <module>
7653
+ main()
7654
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/typing_extensions.py", line 3253, in wrapper
7655
+ return arg(*args, **kwargs)
7656
+ ^^^^^^^^^^^^^^^^^^^^
7657
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 203, in main
7658
+ launch(args)
7659
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launch.py", line 188, in launch
7660
+ run(args)
7661
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/run.py", line 883, in run
7662
+ elastic_launch(
7663
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 139, in __call__
7664
+ return launch_agent(self._config, self._entrypoint, list(args))
7665
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
7666
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/launcher/api.py", line 261, in launch_agent
7667
+ result = agent.run()
7668
+ ^^^^^^^^^^^
7669
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/metrics/api.py", line 138, in wrapper
7670
+ result = f(*args, **kwargs)
7671
+ ^^^^^^^^^^^^^^^^^^
7672
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 711, in run
7673
+ result = self._invoke_run(role)
7674
+ ^^^^^^^^^^^^^^^^^^^^^^
7675
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/agent/server/api.py", line 906, in _invoke_run
7676
+ num_nodes_waiting = rdzv_handler.num_nodes_waiting()
7677
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
7678
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 1263, in num_nodes_waiting
7679
+ self._state_holder.sync()
7680
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py", line 437, in sync
7681
+ get_response = self._backend.get_state()
7682
+ ^^^^^^^^^^^^^^^^^^^^^^^^^
7683
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 75, in get_state
7684
+ base64_state: bytes = self._call_store("get", self._key)
7685
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
7686
+ File "/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/lib/python3.12/site-packages/torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py", line 119, in _call_store
7687
+ raise RendezvousConnectionError(
7688
+ torch.distributed.elastic.rendezvous.api.RendezvousConnectionError: The connection to the C10d store has failed. See inner exception for details.
7689
+ + set +x