axrzce commited on
Commit
74ffeb9
·
verified ·
1 Parent(s): ebf124a

Deploy from GitHub main

Browse files
src/ui/compi_phase3_final_dashboard.py CHANGED
@@ -16,6 +16,7 @@ Features:
16
  - True fusion engine: real processing for all inputs, intelligent generation mode selection
17
  """
18
 
 
19
  import os
20
  import io
21
  import csv
@@ -1290,6 +1291,14 @@ with tab_inputs:
1290
  paths.append(path)
1291
  images.append(img)
1292
 
 
 
 
 
 
 
 
 
1293
  # Log generation
1294
  rows.append({
1295
  "filepath": path,
@@ -1334,6 +1343,17 @@ with tab_inputs:
1334
  log_rows(rows, RUNLOG)
1335
  st.success(f"Saved {len(rows)} image(s). Run log updated: {RUNLOG}")
1336
 
 
 
 
 
 
 
 
 
 
 
 
1337
  # ==================== GALLERY & ANNOTATE TAB (Phase 3.D) ====================
1338
 
1339
  with tab_gallery:
 
16
  - True fusion engine: real processing for all inputs, intelligent generation mode selection
17
  """
18
 
19
+ import gc
20
  import os
21
  import io
22
  import csv
 
1291
  paths.append(path)
1292
  images.append(img)
1293
 
1294
+ # EXPLICIT VRAM CLEANUP after successful generation
1295
+ if DEVICE == "cuda":
1296
+ torch.cuda.empty_cache()
1297
+ torch.cuda.synchronize()
1298
+ # Force garbage collection to help with Python object cleanup
1299
+ import gc
1300
+ gc.collect()
1301
+
1302
  # Log generation
1303
  rows.append({
1304
  "filepath": path,
 
1343
  log_rows(rows, RUNLOG)
1344
  st.success(f"Saved {len(rows)} image(s). Run log updated: {RUNLOG}")
1345
 
1346
+ # FINAL VRAM CLEANUP after entire batch
1347
+ if DEVICE == "cuda":
1348
+ torch.cuda.empty_cache()
1349
+ torch.cuda.synchronize()
1350
+ import gc
1351
+ gc.collect()
1352
+ # Show updated VRAM usage
1353
+ used_after = vram_used_gb()
1354
+ if used_after:
1355
+ st.info(f"🧹 VRAM cleaned • Current usage: {used_after:.2f} GB")
1356
+
1357
  # ==================== GALLERY & ANNOTATE TAB (Phase 3.D) ====================
1358
 
1359
  with tab_gallery: