Freak-ppa commited on
Commit
a829bff
ยท
verified ยท
1 Parent(s): ea000ae

Upload 6 files

Browse files
ComfyUI/custom_nodes/ComfyUI_yanc/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .yanc import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
2
+
3
+ __all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "last_node_id": 5,
3
+ "last_link_id": 5,
4
+ "nodes": [
5
+ {
6
+ "id": 3,
7
+ "type": "PreviewImage",
8
+ "pos": [
9
+ 442,
10
+ 250
11
+ ],
12
+ "size": [
13
+ 300,
14
+ 246
15
+ ],
16
+ "flags": {},
17
+ "order": 2,
18
+ "mode": 0,
19
+ "inputs": [
20
+ {
21
+ "name": "images",
22
+ "type": "IMAGE",
23
+ "link": 2
24
+ }
25
+ ],
26
+ "properties": {
27
+ "Node name for S&R": "PreviewImage"
28
+ }
29
+ },
30
+ {
31
+ "id": 4,
32
+ "type": "> Save Image",
33
+ "pos": [
34
+ 820,
35
+ 100
36
+ ],
37
+ "size": [
38
+ 315,
39
+ 338
40
+ ],
41
+ "flags": {},
42
+ "order": 3,
43
+ "mode": 0,
44
+ "inputs": [
45
+ {
46
+ "name": "images",
47
+ "type": "IMAGE",
48
+ "link": 3
49
+ },
50
+ {
51
+ "name": "filename_opt",
52
+ "type": "STRING",
53
+ "link": 4,
54
+ "widget": {
55
+ "name": "filename_opt"
56
+ }
57
+ }
58
+ ],
59
+ "properties": {
60
+ "Node name for S&R": "> Save Image"
61
+ },
62
+ "widgets_values": [
63
+ "ComfyUI",
64
+ "myoutputs",
65
+ true,
66
+ ""
67
+ ]
68
+ },
69
+ {
70
+ "id": 1,
71
+ "type": "> Load Image From Folder",
72
+ "pos": [
73
+ 440,
74
+ 100
75
+ ],
76
+ "size": {
77
+ "0": 315,
78
+ "1": 102
79
+ },
80
+ "flags": {},
81
+ "order": 1,
82
+ "mode": 0,
83
+ "inputs": [
84
+ {
85
+ "name": "index",
86
+ "type": "INT",
87
+ "link": 5,
88
+ "widget": {
89
+ "name": "index"
90
+ }
91
+ }
92
+ ],
93
+ "outputs": [
94
+ {
95
+ "name": "image",
96
+ "type": "IMAGE",
97
+ "links": [
98
+ 2,
99
+ 3
100
+ ],
101
+ "shape": 3,
102
+ "slot_index": 0
103
+ },
104
+ {
105
+ "name": "file_name",
106
+ "type": "STRING",
107
+ "links": [
108
+ 4
109
+ ],
110
+ "shape": 3,
111
+ "slot_index": 1
112
+ }
113
+ ],
114
+ "properties": {
115
+ "Node name for S&R": "> Load Image From Folder"
116
+ },
117
+ "widgets_values": [
118
+ "myinputs",
119
+ -1
120
+ ]
121
+ },
122
+ {
123
+ "id": 5,
124
+ "type": "> Int",
125
+ "pos": [
126
+ 53,
127
+ 103
128
+ ],
129
+ "size": {
130
+ "0": 315,
131
+ "1": 82
132
+ },
133
+ "flags": {},
134
+ "order": 0,
135
+ "mode": 0,
136
+ "outputs": [
137
+ {
138
+ "name": "int",
139
+ "type": "INT",
140
+ "links": [
141
+ 5
142
+ ],
143
+ "shape": 3,
144
+ "slot_index": 0
145
+ }
146
+ ],
147
+ "properties": {
148
+ "Node name for S&R": "> Int"
149
+ },
150
+ "widgets_values": [
151
+ 0,
152
+ "increment"
153
+ ]
154
+ }
155
+ ],
156
+ "links": [
157
+ [
158
+ 2,
159
+ 1,
160
+ 0,
161
+ 3,
162
+ 0,
163
+ "IMAGE"
164
+ ],
165
+ [
166
+ 3,
167
+ 1,
168
+ 0,
169
+ 4,
170
+ 0,
171
+ "IMAGE"
172
+ ],
173
+ [
174
+ 4,
175
+ 1,
176
+ 1,
177
+ 4,
178
+ 1,
179
+ "STRING"
180
+ ],
181
+ [
182
+ 5,
183
+ 5,
184
+ 0,
185
+ 1,
186
+ 0,
187
+ "INT"
188
+ ]
189
+ ],
190
+ "groups": [],
191
+ "config": {},
192
+ "extra": {},
193
+ "version": 0.4
194
+ }
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename_and_counter.json ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "last_node_id": 10,
3
+ "last_link_id": 13,
4
+ "nodes": [
5
+ {
6
+ "id": 3,
7
+ "type": "PreviewImage",
8
+ "pos": [
9
+ 442,
10
+ 250
11
+ ],
12
+ "size": [
13
+ 300,
14
+ 246
15
+ ],
16
+ "flags": {},
17
+ "order": 4,
18
+ "mode": 0,
19
+ "inputs": [
20
+ {
21
+ "name": "images",
22
+ "type": "IMAGE",
23
+ "link": 2
24
+ }
25
+ ],
26
+ "properties": {
27
+ "Node name for S&R": "PreviewImage"
28
+ }
29
+ },
30
+ {
31
+ "id": 4,
32
+ "type": "> Save Image",
33
+ "pos": [
34
+ 1580,
35
+ 100
36
+ ],
37
+ "size": [
38
+ 315,
39
+ 338
40
+ ],
41
+ "flags": {},
42
+ "order": 8,
43
+ "mode": 0,
44
+ "inputs": [
45
+ {
46
+ "name": "images",
47
+ "type": "IMAGE",
48
+ "link": 3
49
+ },
50
+ {
51
+ "name": "filename_opt",
52
+ "type": "STRING",
53
+ "link": 7,
54
+ "widget": {
55
+ "name": "filename_opt"
56
+ }
57
+ }
58
+ ],
59
+ "properties": {
60
+ "Node name for S&R": "> Save Image"
61
+ },
62
+ "widgets_values": [
63
+ "ComfyUI",
64
+ "myoutputs",
65
+ true,
66
+ ""
67
+ ]
68
+ },
69
+ {
70
+ "id": 1,
71
+ "type": "> Load Image From Folder",
72
+ "pos": [
73
+ 440,
74
+ 100
75
+ ],
76
+ "size": {
77
+ "0": 315,
78
+ "1": 102
79
+ },
80
+ "flags": {},
81
+ "order": 2,
82
+ "mode": 0,
83
+ "inputs": [
84
+ {
85
+ "name": "index",
86
+ "type": "INT",
87
+ "link": 5,
88
+ "widget": {
89
+ "name": "index"
90
+ }
91
+ }
92
+ ],
93
+ "outputs": [
94
+ {
95
+ "name": "image",
96
+ "type": "IMAGE",
97
+ "links": [
98
+ 2,
99
+ 3
100
+ ],
101
+ "shape": 3,
102
+ "slot_index": 0
103
+ },
104
+ {
105
+ "name": "file_name",
106
+ "type": "STRING",
107
+ "links": [
108
+ 6
109
+ ],
110
+ "shape": 3,
111
+ "slot_index": 1
112
+ }
113
+ ],
114
+ "properties": {
115
+ "Node name for S&R": "> Load Image From Folder"
116
+ },
117
+ "widgets_values": [
118
+ "myinputs",
119
+ -1
120
+ ]
121
+ },
122
+ {
123
+ "id": 6,
124
+ "type": "> Text Combine",
125
+ "pos": [
126
+ 1220,
127
+ 160
128
+ ],
129
+ "size": [
130
+ 315,
131
+ 130
132
+ ],
133
+ "flags": {},
134
+ "order": 7,
135
+ "mode": 0,
136
+ "inputs": [
137
+ {
138
+ "name": "text",
139
+ "type": "STRING",
140
+ "link": 6,
141
+ "widget": {
142
+ "name": "text"
143
+ }
144
+ },
145
+ {
146
+ "name": "text_append",
147
+ "type": "STRING",
148
+ "link": 8,
149
+ "widget": {
150
+ "name": "text_append"
151
+ }
152
+ }
153
+ ],
154
+ "outputs": [
155
+ {
156
+ "name": "text",
157
+ "type": "STRING",
158
+ "links": [
159
+ 7
160
+ ],
161
+ "shape": 3,
162
+ "slot_index": 0
163
+ }
164
+ ],
165
+ "properties": {
166
+ "Node name for S&R": "> Text Combine"
167
+ },
168
+ "widgets_values": [
169
+ "",
170
+ "",
171
+ "_",
172
+ false
173
+ ]
174
+ },
175
+ {
176
+ "id": 7,
177
+ "type": "> Int to Text",
178
+ "pos": [
179
+ 860,
180
+ 240
181
+ ],
182
+ "size": [
183
+ 315,
184
+ 106
185
+ ],
186
+ "flags": {},
187
+ "order": 6,
188
+ "mode": 0,
189
+ "inputs": [
190
+ {
191
+ "name": "int",
192
+ "type": "INT",
193
+ "link": 13,
194
+ "widget": {
195
+ "name": "int"
196
+ }
197
+ }
198
+ ],
199
+ "outputs": [
200
+ {
201
+ "name": "text",
202
+ "type": "STRING",
203
+ "links": [
204
+ 8
205
+ ],
206
+ "shape": 3,
207
+ "slot_index": 0
208
+ }
209
+ ],
210
+ "properties": {
211
+ "Node name for S&R": "> Int to Text"
212
+ },
213
+ "widgets_values": [
214
+ 0,
215
+ true,
216
+ 5
217
+ ]
218
+ },
219
+ {
220
+ "id": 5,
221
+ "type": "> Int",
222
+ "pos": [
223
+ 53,
224
+ 103
225
+ ],
226
+ "size": {
227
+ "0": 315,
228
+ "1": 82
229
+ },
230
+ "flags": {},
231
+ "order": 0,
232
+ "mode": 0,
233
+ "outputs": [
234
+ {
235
+ "name": "int",
236
+ "type": "INT",
237
+ "links": [
238
+ 5,
239
+ 10
240
+ ],
241
+ "shape": 3,
242
+ "slot_index": 0
243
+ }
244
+ ],
245
+ "properties": {
246
+ "Node name for S&R": "> Int"
247
+ },
248
+ "widgets_values": [
249
+ 124,
250
+ "increment"
251
+ ]
252
+ },
253
+ {
254
+ "id": 10,
255
+ "type": "> Float to Int",
256
+ "pos": [
257
+ 780,
258
+ 580
259
+ ],
260
+ "size": [
261
+ 315,
262
+ 82
263
+ ],
264
+ "flags": {},
265
+ "order": 5,
266
+ "mode": 0,
267
+ "inputs": [
268
+ {
269
+ "name": "float",
270
+ "type": "FLOAT",
271
+ "link": 12,
272
+ "widget": {
273
+ "name": "float"
274
+ }
275
+ }
276
+ ],
277
+ "outputs": [
278
+ {
279
+ "name": "int",
280
+ "type": "INT",
281
+ "links": [
282
+ 13
283
+ ],
284
+ "shape": 3,
285
+ "slot_index": 0
286
+ }
287
+ ],
288
+ "properties": {
289
+ "Node name for S&R": "> Float to Int"
290
+ },
291
+ "widgets_values": [
292
+ 0,
293
+ "floor"
294
+ ]
295
+ },
296
+ {
297
+ "id": 8,
298
+ "type": "SimpleMath+",
299
+ "pos": [
300
+ 420,
301
+ 560
302
+ ],
303
+ "size": {
304
+ "0": 315,
305
+ "1": 78
306
+ },
307
+ "flags": {},
308
+ "order": 3,
309
+ "mode": 0,
310
+ "inputs": [
311
+ {
312
+ "name": "a",
313
+ "type": "INT,FLOAT",
314
+ "link": 10
315
+ },
316
+ {
317
+ "name": "b",
318
+ "type": "INT,FLOAT",
319
+ "link": 11
320
+ }
321
+ ],
322
+ "outputs": [
323
+ {
324
+ "name": "INT",
325
+ "type": "INT",
326
+ "links": null,
327
+ "shape": 3,
328
+ "slot_index": 0
329
+ },
330
+ {
331
+ "name": "FLOAT",
332
+ "type": "FLOAT",
333
+ "links": [
334
+ 12
335
+ ],
336
+ "shape": 3,
337
+ "slot_index": 1
338
+ }
339
+ ],
340
+ "properties": {
341
+ "Node name for S&R": "SimpleMath+"
342
+ },
343
+ "widgets_values": [
344
+ "a/b"
345
+ ]
346
+ },
347
+ {
348
+ "id": 9,
349
+ "type": "SimpleMath+",
350
+ "pos": [
351
+ 60,
352
+ 560
353
+ ],
354
+ "size": {
355
+ "0": 315,
356
+ "1": 78
357
+ },
358
+ "flags": {},
359
+ "order": 1,
360
+ "mode": 0,
361
+ "inputs": [
362
+ {
363
+ "name": "a",
364
+ "type": "INT,FLOAT",
365
+ "link": null
366
+ },
367
+ {
368
+ "name": "b",
369
+ "type": "INT,FLOAT",
370
+ "link": null
371
+ }
372
+ ],
373
+ "outputs": [
374
+ {
375
+ "name": "INT",
376
+ "type": "INT",
377
+ "links": [
378
+ 11
379
+ ],
380
+ "shape": 3,
381
+ "slot_index": 0
382
+ },
383
+ {
384
+ "name": "FLOAT",
385
+ "type": "FLOAT",
386
+ "links": null,
387
+ "shape": 3
388
+ }
389
+ ],
390
+ "title": "Amount of Images in Input Folder",
391
+ "properties": {
392
+ "Node name for S&R": "SimpleMath+"
393
+ },
394
+ "widgets_values": [
395
+ "62"
396
+ ]
397
+ }
398
+ ],
399
+ "links": [
400
+ [
401
+ 2,
402
+ 1,
403
+ 0,
404
+ 3,
405
+ 0,
406
+ "IMAGE"
407
+ ],
408
+ [
409
+ 3,
410
+ 1,
411
+ 0,
412
+ 4,
413
+ 0,
414
+ "IMAGE"
415
+ ],
416
+ [
417
+ 5,
418
+ 5,
419
+ 0,
420
+ 1,
421
+ 0,
422
+ "INT"
423
+ ],
424
+ [
425
+ 6,
426
+ 1,
427
+ 1,
428
+ 6,
429
+ 0,
430
+ "STRING"
431
+ ],
432
+ [
433
+ 7,
434
+ 6,
435
+ 0,
436
+ 4,
437
+ 1,
438
+ "STRING"
439
+ ],
440
+ [
441
+ 8,
442
+ 7,
443
+ 0,
444
+ 6,
445
+ 1,
446
+ "STRING"
447
+ ],
448
+ [
449
+ 10,
450
+ 5,
451
+ 0,
452
+ 8,
453
+ 0,
454
+ "INT,FLOAT"
455
+ ],
456
+ [
457
+ 11,
458
+ 9,
459
+ 0,
460
+ 8,
461
+ 1,
462
+ "INT,FLOAT"
463
+ ],
464
+ [
465
+ 12,
466
+ 8,
467
+ 1,
468
+ 10,
469
+ 0,
470
+ "FLOAT"
471
+ ],
472
+ [
473
+ 13,
474
+ 10,
475
+ 0,
476
+ 7,
477
+ 0,
478
+ "INT"
479
+ ]
480
+ ],
481
+ "groups": [],
482
+ "config": {},
483
+ "extra": {},
484
+ "version": 0.4
485
+ }
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_save_with_filename_in_divided_folders.json ADDED
@@ -0,0 +1,536 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "last_node_id": 11,
3
+ "last_link_id": 17,
4
+ "nodes": [
5
+ {
6
+ "id": 3,
7
+ "type": "PreviewImage",
8
+ "pos": [
9
+ 442,
10
+ 250
11
+ ],
12
+ "size": [
13
+ 300,
14
+ 246
15
+ ],
16
+ "flags": {},
17
+ "order": 5,
18
+ "mode": 0,
19
+ "inputs": [
20
+ {
21
+ "name": "images",
22
+ "type": "IMAGE",
23
+ "link": 2
24
+ }
25
+ ],
26
+ "properties": {
27
+ "Node name for S&R": "PreviewImage"
28
+ }
29
+ },
30
+ {
31
+ "id": 10,
32
+ "type": "> Float to Int",
33
+ "pos": [
34
+ 780,
35
+ 580
36
+ ],
37
+ "size": [
38
+ 315,
39
+ 82
40
+ ],
41
+ "flags": {},
42
+ "order": 6,
43
+ "mode": 0,
44
+ "inputs": [
45
+ {
46
+ "name": "float",
47
+ "type": "FLOAT",
48
+ "link": 12,
49
+ "widget": {
50
+ "name": "float"
51
+ }
52
+ }
53
+ ],
54
+ "outputs": [
55
+ {
56
+ "name": "int",
57
+ "type": "INT",
58
+ "links": [
59
+ 13
60
+ ],
61
+ "shape": 3,
62
+ "slot_index": 0
63
+ }
64
+ ],
65
+ "properties": {
66
+ "Node name for S&R": "> Float to Int"
67
+ },
68
+ "widgets_values": [
69
+ 0,
70
+ "floor"
71
+ ]
72
+ },
73
+ {
74
+ "id": 8,
75
+ "type": "SimpleMath+",
76
+ "pos": [
77
+ 420,
78
+ 560
79
+ ],
80
+ "size": {
81
+ "0": 315,
82
+ "1": 78
83
+ },
84
+ "flags": {},
85
+ "order": 4,
86
+ "mode": 0,
87
+ "inputs": [
88
+ {
89
+ "name": "a",
90
+ "type": "INT,FLOAT",
91
+ "link": 10
92
+ },
93
+ {
94
+ "name": "b",
95
+ "type": "INT,FLOAT",
96
+ "link": 11
97
+ }
98
+ ],
99
+ "outputs": [
100
+ {
101
+ "name": "INT",
102
+ "type": "INT",
103
+ "links": null,
104
+ "shape": 3,
105
+ "slot_index": 0
106
+ },
107
+ {
108
+ "name": "FLOAT",
109
+ "type": "FLOAT",
110
+ "links": [
111
+ 12
112
+ ],
113
+ "shape": 3,
114
+ "slot_index": 1
115
+ }
116
+ ],
117
+ "properties": {
118
+ "Node name for S&R": "SimpleMath+"
119
+ },
120
+ "widgets_values": [
121
+ "a/b"
122
+ ]
123
+ },
124
+ {
125
+ "id": 9,
126
+ "type": "SimpleMath+",
127
+ "pos": [
128
+ 60,
129
+ 560
130
+ ],
131
+ "size": {
132
+ "0": 315,
133
+ "1": 78
134
+ },
135
+ "flags": {},
136
+ "order": 0,
137
+ "mode": 0,
138
+ "inputs": [
139
+ {
140
+ "name": "a",
141
+ "type": "INT,FLOAT",
142
+ "link": null
143
+ },
144
+ {
145
+ "name": "b",
146
+ "type": "INT,FLOAT",
147
+ "link": null
148
+ }
149
+ ],
150
+ "outputs": [
151
+ {
152
+ "name": "INT",
153
+ "type": "INT",
154
+ "links": [
155
+ 11
156
+ ],
157
+ "shape": 3,
158
+ "slot_index": 0
159
+ },
160
+ {
161
+ "name": "FLOAT",
162
+ "type": "FLOAT",
163
+ "links": null,
164
+ "shape": 3
165
+ }
166
+ ],
167
+ "title": "Amount of Images in Input Folder",
168
+ "properties": {
169
+ "Node name for S&R": "SimpleMath+"
170
+ },
171
+ "widgets_values": [
172
+ "62"
173
+ ]
174
+ },
175
+ {
176
+ "id": 4,
177
+ "type": "> Save Image",
178
+ "pos": [
179
+ 1580,
180
+ 100
181
+ ],
182
+ "size": [
183
+ 315,
184
+ 338
185
+ ],
186
+ "flags": {},
187
+ "order": 9,
188
+ "mode": 0,
189
+ "inputs": [
190
+ {
191
+ "name": "images",
192
+ "type": "IMAGE",
193
+ "link": 3
194
+ },
195
+ {
196
+ "name": "filename_opt",
197
+ "type": "STRING",
198
+ "link": 14,
199
+ "widget": {
200
+ "name": "filename_opt"
201
+ }
202
+ },
203
+ {
204
+ "name": "folder",
205
+ "type": "STRING",
206
+ "link": 15,
207
+ "widget": {
208
+ "name": "folder"
209
+ }
210
+ }
211
+ ],
212
+ "properties": {
213
+ "Node name for S&R": "> Save Image"
214
+ },
215
+ "widgets_values": [
216
+ "ComfyUI",
217
+ "myoutputs",
218
+ true,
219
+ ""
220
+ ]
221
+ },
222
+ {
223
+ "id": 1,
224
+ "type": "> Load Image From Folder",
225
+ "pos": [
226
+ 440,
227
+ 100
228
+ ],
229
+ "size": {
230
+ "0": 315,
231
+ "1": 102
232
+ },
233
+ "flags": {},
234
+ "order": 3,
235
+ "mode": 0,
236
+ "inputs": [
237
+ {
238
+ "name": "index",
239
+ "type": "INT",
240
+ "link": 5,
241
+ "widget": {
242
+ "name": "index"
243
+ }
244
+ }
245
+ ],
246
+ "outputs": [
247
+ {
248
+ "name": "image",
249
+ "type": "IMAGE",
250
+ "links": [
251
+ 2,
252
+ 3
253
+ ],
254
+ "shape": 3,
255
+ "slot_index": 0
256
+ },
257
+ {
258
+ "name": "file_name",
259
+ "type": "STRING",
260
+ "links": [
261
+ 14
262
+ ],
263
+ "shape": 3,
264
+ "slot_index": 1
265
+ }
266
+ ],
267
+ "properties": {
268
+ "Node name for S&R": "> Load Image From Folder"
269
+ },
270
+ "widgets_values": [
271
+ "myinputs",
272
+ -1
273
+ ]
274
+ },
275
+ {
276
+ "id": 7,
277
+ "type": "> Int to Text",
278
+ "pos": [
279
+ 855,
280
+ 325
281
+ ],
282
+ "size": [
283
+ 315,
284
+ 106
285
+ ],
286
+ "flags": {},
287
+ "order": 7,
288
+ "mode": 0,
289
+ "inputs": [
290
+ {
291
+ "name": "int",
292
+ "type": "INT",
293
+ "link": 13,
294
+ "widget": {
295
+ "name": "int"
296
+ }
297
+ }
298
+ ],
299
+ "outputs": [
300
+ {
301
+ "name": "text",
302
+ "type": "STRING",
303
+ "links": [
304
+ 8
305
+ ],
306
+ "shape": 3,
307
+ "slot_index": 0
308
+ }
309
+ ],
310
+ "properties": {
311
+ "Node name for S&R": "> Int to Text"
312
+ },
313
+ "widgets_values": [
314
+ 0,
315
+ true,
316
+ 5
317
+ ]
318
+ },
319
+ {
320
+ "id": 6,
321
+ "type": "> Text Combine",
322
+ "pos": [
323
+ 1220,
324
+ 220
325
+ ],
326
+ "size": [
327
+ 315,
328
+ 130
329
+ ],
330
+ "flags": {},
331
+ "order": 8,
332
+ "mode": 0,
333
+ "inputs": [
334
+ {
335
+ "name": "text",
336
+ "type": "STRING",
337
+ "link": 17,
338
+ "widget": {
339
+ "name": "text"
340
+ },
341
+ "slot_index": 0
342
+ },
343
+ {
344
+ "name": "text_append",
345
+ "type": "STRING",
346
+ "link": 8,
347
+ "widget": {
348
+ "name": "text_append"
349
+ }
350
+ }
351
+ ],
352
+ "outputs": [
353
+ {
354
+ "name": "text",
355
+ "type": "STRING",
356
+ "links": [
357
+ 15
358
+ ],
359
+ "shape": 3,
360
+ "slot_index": 0
361
+ }
362
+ ],
363
+ "properties": {
364
+ "Node name for S&R": "> Text Combine"
365
+ },
366
+ "widgets_values": [
367
+ "myoutputs",
368
+ "",
369
+ "_",
370
+ false
371
+ ]
372
+ },
373
+ {
374
+ "id": 11,
375
+ "type": "PrimitiveNode",
376
+ "pos": [
377
+ 850,
378
+ 215
379
+ ],
380
+ "size": [
381
+ 320,
382
+ 60
383
+ ],
384
+ "flags": {},
385
+ "order": 1,
386
+ "mode": 0,
387
+ "outputs": [
388
+ {
389
+ "name": "STRING",
390
+ "type": "STRING",
391
+ "links": [
392
+ 17
393
+ ],
394
+ "widget": {
395
+ "name": "text"
396
+ }
397
+ }
398
+ ],
399
+ "title": "text",
400
+ "properties": {
401
+ "Run widget replace on values": false
402
+ },
403
+ "widgets_values": [
404
+ "myoutputs"
405
+ ]
406
+ },
407
+ {
408
+ "id": 5,
409
+ "type": "> Int",
410
+ "pos": [
411
+ 53,
412
+ 103
413
+ ],
414
+ "size": {
415
+ "0": 315,
416
+ "1": 82
417
+ },
418
+ "flags": {},
419
+ "order": 2,
420
+ "mode": 0,
421
+ "outputs": [
422
+ {
423
+ "name": "int",
424
+ "type": "INT",
425
+ "links": [
426
+ 5,
427
+ 10
428
+ ],
429
+ "shape": 3,
430
+ "slot_index": 0
431
+ }
432
+ ],
433
+ "properties": {
434
+ "Node name for S&R": "> Int"
435
+ },
436
+ "widgets_values": [
437
+ 0,
438
+ "increment"
439
+ ]
440
+ }
441
+ ],
442
+ "links": [
443
+ [
444
+ 2,
445
+ 1,
446
+ 0,
447
+ 3,
448
+ 0,
449
+ "IMAGE"
450
+ ],
451
+ [
452
+ 3,
453
+ 1,
454
+ 0,
455
+ 4,
456
+ 0,
457
+ "IMAGE"
458
+ ],
459
+ [
460
+ 5,
461
+ 5,
462
+ 0,
463
+ 1,
464
+ 0,
465
+ "INT"
466
+ ],
467
+ [
468
+ 8,
469
+ 7,
470
+ 0,
471
+ 6,
472
+ 1,
473
+ "STRING"
474
+ ],
475
+ [
476
+ 10,
477
+ 5,
478
+ 0,
479
+ 8,
480
+ 0,
481
+ "INT,FLOAT"
482
+ ],
483
+ [
484
+ 11,
485
+ 9,
486
+ 0,
487
+ 8,
488
+ 1,
489
+ "INT,FLOAT"
490
+ ],
491
+ [
492
+ 12,
493
+ 8,
494
+ 1,
495
+ 10,
496
+ 0,
497
+ "FLOAT"
498
+ ],
499
+ [
500
+ 13,
501
+ 10,
502
+ 0,
503
+ 7,
504
+ 0,
505
+ "INT"
506
+ ],
507
+ [
508
+ 14,
509
+ 1,
510
+ 1,
511
+ 4,
512
+ 1,
513
+ "STRING"
514
+ ],
515
+ [
516
+ 15,
517
+ 6,
518
+ 0,
519
+ 4,
520
+ 2,
521
+ "STRING"
522
+ ],
523
+ [
524
+ 17,
525
+ 11,
526
+ 0,
527
+ 6,
528
+ 0,
529
+ "STRING"
530
+ ]
531
+ ],
532
+ "groups": [],
533
+ "config": {},
534
+ "extra": {},
535
+ "version": 0.4
536
+ }
ComfyUI/custom_nodes/ComfyUI_yanc/examples/yanc_text_nodes_example.json ADDED
@@ -0,0 +1,822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "last_node_id": 35,
3
+ "last_link_id": 64,
4
+ "nodes": [
5
+ {
6
+ "id": 8,
7
+ "type": "VAEDecode",
8
+ "pos": [
9
+ 2040,
10
+ 700
11
+ ],
12
+ "size": {
13
+ "0": 210,
14
+ "1": 46
15
+ },
16
+ "flags": {},
17
+ "order": 14,
18
+ "mode": 0,
19
+ "inputs": [
20
+ {
21
+ "name": "samples",
22
+ "type": "LATENT",
23
+ "link": 7
24
+ },
25
+ {
26
+ "name": "vae",
27
+ "type": "VAE",
28
+ "link": 8
29
+ }
30
+ ],
31
+ "outputs": [
32
+ {
33
+ "name": "IMAGE",
34
+ "type": "IMAGE",
35
+ "links": [
36
+ 34
37
+ ],
38
+ "slot_index": 0
39
+ }
40
+ ],
41
+ "properties": {
42
+ "Node name for S&R": "VAEDecode"
43
+ }
44
+ },
45
+ {
46
+ "id": 27,
47
+ "type": "> Save Image",
48
+ "pos": [
49
+ 2300,
50
+ 700
51
+ ],
52
+ "size": [
53
+ 480,
54
+ 620
55
+ ],
56
+ "flags": {},
57
+ "order": 15,
58
+ "mode": 0,
59
+ "inputs": [
60
+ {
61
+ "name": "images",
62
+ "type": "IMAGE",
63
+ "link": 34
64
+ },
65
+ {
66
+ "name": "filename_opt",
67
+ "type": "STRING",
68
+ "link": null,
69
+ "widget": {
70
+ "name": "filename_opt"
71
+ }
72
+ }
73
+ ],
74
+ "properties": {
75
+ "Node name for S&R": "> Save Image"
76
+ },
77
+ "widgets_values": [
78
+ "ComfyUI",
79
+ "yanc_demo",
80
+ true,
81
+ ""
82
+ ]
83
+ },
84
+ {
85
+ "id": 29,
86
+ "type": "> Text Pick Random Line",
87
+ "pos": [
88
+ 300,
89
+ 560
90
+ ],
91
+ "size": [
92
+ 315,
93
+ 106
94
+ ],
95
+ "flags": {},
96
+ "order": 5,
97
+ "mode": 0,
98
+ "inputs": [
99
+ {
100
+ "name": "text",
101
+ "type": "STRING",
102
+ "link": 39,
103
+ "widget": {
104
+ "name": "text"
105
+ }
106
+ }
107
+ ],
108
+ "outputs": [
109
+ {
110
+ "name": "text",
111
+ "type": "STRING",
112
+ "links": [
113
+ 42
114
+ ],
115
+ "shape": 3,
116
+ "slot_index": 0
117
+ }
118
+ ],
119
+ "properties": {
120
+ "Node name for S&R": "> Text Pick Random Line"
121
+ },
122
+ "widgets_values": [
123
+ "",
124
+ 529102614921446,
125
+ "randomize"
126
+ ]
127
+ },
128
+ {
129
+ "id": 24,
130
+ "type": "> Text Combine",
131
+ "pos": [
132
+ 980,
133
+ 540
134
+ ],
135
+ "size": [
136
+ 210,
137
+ 102
138
+ ],
139
+ "flags": {},
140
+ "order": 10,
141
+ "mode": 0,
142
+ "inputs": [
143
+ {
144
+ "name": "text",
145
+ "type": "STRING",
146
+ "link": 38,
147
+ "widget": {
148
+ "name": "text"
149
+ }
150
+ },
151
+ {
152
+ "name": "text_append",
153
+ "type": "STRING",
154
+ "link": 54,
155
+ "widget": {
156
+ "name": "text_append"
157
+ }
158
+ }
159
+ ],
160
+ "outputs": [
161
+ {
162
+ "name": "text",
163
+ "type": "STRING",
164
+ "links": [
165
+ 52,
166
+ 53
167
+ ],
168
+ "shape": 3,
169
+ "slot_index": 0
170
+ }
171
+ ],
172
+ "properties": {
173
+ "Node name for S&R": "> Text Combine"
174
+ },
175
+ "widgets_values": [
176
+ "",
177
+ "",
178
+ "",
179
+ true
180
+ ]
181
+ },
182
+ {
183
+ "id": 23,
184
+ "type": "> Clear Text",
185
+ "pos": [
186
+ 640,
187
+ 800
188
+ ],
189
+ "size": [
190
+ 320,
191
+ 60
192
+ ],
193
+ "flags": {},
194
+ "order": 9,
195
+ "mode": 0,
196
+ "inputs": [
197
+ {
198
+ "name": "text",
199
+ "type": "STRING",
200
+ "link": 27,
201
+ "widget": {
202
+ "name": "text"
203
+ }
204
+ }
205
+ ],
206
+ "outputs": [
207
+ {
208
+ "name": "text",
209
+ "type": "STRING",
210
+ "links": [
211
+ 54
212
+ ],
213
+ "shape": 3,
214
+ "slot_index": 0
215
+ }
216
+ ],
217
+ "properties": {
218
+ "Node name for S&R": "> Clear Text"
219
+ },
220
+ "widgets_values": [
221
+ "",
222
+ 0.25
223
+ ]
224
+ },
225
+ {
226
+ "id": 7,
227
+ "type": "CLIPTextEncode",
228
+ "pos": [
229
+ 1240,
230
+ 900
231
+ ],
232
+ "size": {
233
+ "0": 425.27801513671875,
234
+ "1": 180.6060791015625
235
+ },
236
+ "flags": {},
237
+ "order": 6,
238
+ "mode": 0,
239
+ "inputs": [
240
+ {
241
+ "name": "clip",
242
+ "type": "CLIP",
243
+ "link": 5
244
+ }
245
+ ],
246
+ "outputs": [
247
+ {
248
+ "name": "CONDITIONING",
249
+ "type": "CONDITIONING",
250
+ "links": [
251
+ 6
252
+ ],
253
+ "slot_index": 0
254
+ }
255
+ ],
256
+ "properties": {
257
+ "Node name for S&R": "CLIPTextEncode"
258
+ },
259
+ "widgets_values": [
260
+ "text, watermark, author, signature, blurry, horror"
261
+ ]
262
+ },
263
+ {
264
+ "id": 6,
265
+ "type": "CLIPTextEncode",
266
+ "pos": [
267
+ 1240,
268
+ 700
269
+ ],
270
+ "size": [
271
+ 220,
272
+ 60
273
+ ],
274
+ "flags": {},
275
+ "order": 12,
276
+ "mode": 0,
277
+ "inputs": [
278
+ {
279
+ "name": "clip",
280
+ "type": "CLIP",
281
+ "link": 3
282
+ },
283
+ {
284
+ "name": "text",
285
+ "type": "STRING",
286
+ "link": 53,
287
+ "widget": {
288
+ "name": "text"
289
+ }
290
+ }
291
+ ],
292
+ "outputs": [
293
+ {
294
+ "name": "CONDITIONING",
295
+ "type": "CONDITIONING",
296
+ "links": [
297
+ 4
298
+ ],
299
+ "slot_index": 0
300
+ }
301
+ ],
302
+ "properties": {
303
+ "Node name for S&R": "CLIPTextEncode"
304
+ },
305
+ "widgets_values": [
306
+ "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
307
+ ]
308
+ },
309
+ {
310
+ "id": 25,
311
+ "type": "ConsoleDebug+",
312
+ "pos": [
313
+ 1260,
314
+ 540
315
+ ],
316
+ "size": {
317
+ "0": 315,
318
+ "1": 58
319
+ },
320
+ "flags": {},
321
+ "order": 11,
322
+ "mode": 0,
323
+ "inputs": [
324
+ {
325
+ "name": "value",
326
+ "type": "*",
327
+ "link": 52
328
+ }
329
+ ],
330
+ "properties": {
331
+ "Node name for S&R": "ConsoleDebug+"
332
+ },
333
+ "widgets_values": [
334
+ "Generated Prompt: "
335
+ ]
336
+ },
337
+ {
338
+ "id": 5,
339
+ "type": "EmptyLatentImage",
340
+ "pos": [
341
+ 1300,
342
+ 1120
343
+ ],
344
+ "size": {
345
+ "0": 315,
346
+ "1": 106
347
+ },
348
+ "flags": {},
349
+ "order": 0,
350
+ "mode": 0,
351
+ "outputs": [
352
+ {
353
+ "name": "LATENT",
354
+ "type": "LATENT",
355
+ "links": [
356
+ 2
357
+ ],
358
+ "slot_index": 0
359
+ }
360
+ ],
361
+ "properties": {
362
+ "Node name for S&R": "EmptyLatentImage"
363
+ },
364
+ "widgets_values": [
365
+ 512,
366
+ 512,
367
+ 1
368
+ ]
369
+ },
370
+ {
371
+ "id": 18,
372
+ "type": "> Text Replace",
373
+ "pos": [
374
+ 640,
375
+ 540
376
+ ],
377
+ "size": [
378
+ 315,
379
+ 106
380
+ ],
381
+ "flags": {},
382
+ "order": 8,
383
+ "mode": 0,
384
+ "inputs": [
385
+ {
386
+ "name": "text",
387
+ "type": "STRING",
388
+ "link": 64,
389
+ "widget": {
390
+ "name": "text"
391
+ }
392
+ },
393
+ {
394
+ "name": "replace",
395
+ "type": "STRING",
396
+ "link": 42,
397
+ "widget": {
398
+ "name": "replace"
399
+ }
400
+ }
401
+ ],
402
+ "outputs": [
403
+ {
404
+ "name": "text",
405
+ "type": "STRING",
406
+ "links": [
407
+ 38
408
+ ],
409
+ "shape": 3,
410
+ "slot_index": 0
411
+ }
412
+ ],
413
+ "properties": {
414
+ "Node name for S&R": "> Text Replace"
415
+ },
416
+ "widgets_values": [
417
+ "",
418
+ "_accessory_",
419
+ ""
420
+ ]
421
+ },
422
+ {
423
+ "id": 28,
424
+ "type": "> Text",
425
+ "pos": [
426
+ -120,
427
+ 560
428
+ ],
429
+ "size": {
430
+ "0": 400,
431
+ "1": 200
432
+ },
433
+ "flags": {},
434
+ "order": 1,
435
+ "mode": 0,
436
+ "outputs": [
437
+ {
438
+ "name": "text",
439
+ "type": "STRING",
440
+ "links": [
441
+ 39
442
+ ],
443
+ "shape": 3,
444
+ "slot_index": 0
445
+ }
446
+ ],
447
+ "properties": {
448
+ "Node name for S&R": "> Text"
449
+ },
450
+ "widgets_values": [
451
+ "a hat\na necklace\nglasses\na shirt"
452
+ ]
453
+ },
454
+ {
455
+ "id": 22,
456
+ "type": "> Text Random Weights",
457
+ "pos": [
458
+ 300,
459
+ 800
460
+ ],
461
+ "size": [
462
+ 315,
463
+ 154
464
+ ],
465
+ "flags": {},
466
+ "order": 7,
467
+ "mode": 0,
468
+ "inputs": [
469
+ {
470
+ "name": "text",
471
+ "type": "STRING",
472
+ "link": 58,
473
+ "widget": {
474
+ "name": "text"
475
+ }
476
+ }
477
+ ],
478
+ "outputs": [
479
+ {
480
+ "name": "text",
481
+ "type": "STRING",
482
+ "links": [
483
+ 27
484
+ ],
485
+ "shape": 3,
486
+ "slot_index": 0
487
+ }
488
+ ],
489
+ "properties": {
490
+ "Node name for S&R": "> Text Random Weights"
491
+ },
492
+ "widgets_values": [
493
+ "",
494
+ 0,
495
+ 1.2000000000000002,
496
+ 784130165638034,
497
+ "randomize"
498
+ ]
499
+ },
500
+ {
501
+ "id": 4,
502
+ "type": "CheckpointLoaderSimple",
503
+ "pos": [
504
+ 840,
505
+ 980
506
+ ],
507
+ "size": {
508
+ "0": 315,
509
+ "1": 98
510
+ },
511
+ "flags": {},
512
+ "order": 2,
513
+ "mode": 0,
514
+ "outputs": [
515
+ {
516
+ "name": "MODEL",
517
+ "type": "MODEL",
518
+ "links": [
519
+ 1
520
+ ],
521
+ "slot_index": 0
522
+ },
523
+ {
524
+ "name": "CLIP",
525
+ "type": "CLIP",
526
+ "links": [
527
+ 3,
528
+ 5
529
+ ],
530
+ "slot_index": 1
531
+ },
532
+ {
533
+ "name": "VAE",
534
+ "type": "VAE",
535
+ "links": [
536
+ 8
537
+ ],
538
+ "slot_index": 2
539
+ }
540
+ ],
541
+ "properties": {
542
+ "Node name for S&R": "CheckpointLoaderSimple"
543
+ },
544
+ "widgets_values": [
545
+ "15\\epicrealism_pureEvolutionV5.safetensors"
546
+ ]
547
+ },
548
+ {
549
+ "id": 31,
550
+ "type": "> Text",
551
+ "pos": [
552
+ -120,
553
+ 800
554
+ ],
555
+ "size": {
556
+ "0": 400,
557
+ "1": 200
558
+ },
559
+ "flags": {},
560
+ "order": 3,
561
+ "mode": 0,
562
+ "outputs": [
563
+ {
564
+ "name": "text",
565
+ "type": "STRING",
566
+ "links": [
567
+ 58
568
+ ],
569
+ "shape": 3,
570
+ "slot_index": 0
571
+ }
572
+ ],
573
+ "properties": {
574
+ "Node name for S&R": "> Text"
575
+ },
576
+ "widgets_values": [
577
+ "oil painting\npencil sketch\ncoal sketch\ncomci strip\nmonochrome"
578
+ ]
579
+ },
580
+ {
581
+ "id": 3,
582
+ "type": "KSampler",
583
+ "pos": [
584
+ 1680,
585
+ 700
586
+ ],
587
+ "size": {
588
+ "0": 315,
589
+ "1": 262
590
+ },
591
+ "flags": {},
592
+ "order": 13,
593
+ "mode": 0,
594
+ "inputs": [
595
+ {
596
+ "name": "model",
597
+ "type": "MODEL",
598
+ "link": 1
599
+ },
600
+ {
601
+ "name": "positive",
602
+ "type": "CONDITIONING",
603
+ "link": 4
604
+ },
605
+ {
606
+ "name": "negative",
607
+ "type": "CONDITIONING",
608
+ "link": 6
609
+ },
610
+ {
611
+ "name": "latent_image",
612
+ "type": "LATENT",
613
+ "link": 2
614
+ }
615
+ ],
616
+ "outputs": [
617
+ {
618
+ "name": "LATENT",
619
+ "type": "LATENT",
620
+ "links": [
621
+ 7
622
+ ],
623
+ "slot_index": 0
624
+ }
625
+ ],
626
+ "properties": {
627
+ "Node name for S&R": "KSampler"
628
+ },
629
+ "widgets_values": [
630
+ 0,
631
+ "fixed",
632
+ 20,
633
+ 6,
634
+ "dpmpp_2m",
635
+ "karras",
636
+ 1
637
+ ]
638
+ },
639
+ {
640
+ "id": 33,
641
+ "type": "> Text",
642
+ "pos": [
643
+ -120,
644
+ 320
645
+ ],
646
+ "size": {
647
+ "0": 400,
648
+ "1": 200
649
+ },
650
+ "flags": {},
651
+ "order": 4,
652
+ "mode": 0,
653
+ "outputs": [
654
+ {
655
+ "name": "text",
656
+ "type": "STRING",
657
+ "links": [
658
+ 64
659
+ ],
660
+ "shape": 3,
661
+ "slot_index": 0
662
+ }
663
+ ],
664
+ "properties": {
665
+ "Node name for S&R": "> Text"
666
+ },
667
+ "widgets_values": [
668
+ "painting of a cat wearing _accessory_\n\nhigh detail"
669
+ ]
670
+ }
671
+ ],
672
+ "links": [
673
+ [
674
+ 1,
675
+ 4,
676
+ 0,
677
+ 3,
678
+ 0,
679
+ "MODEL"
680
+ ],
681
+ [
682
+ 2,
683
+ 5,
684
+ 0,
685
+ 3,
686
+ 3,
687
+ "LATENT"
688
+ ],
689
+ [
690
+ 3,
691
+ 4,
692
+ 1,
693
+ 6,
694
+ 0,
695
+ "CLIP"
696
+ ],
697
+ [
698
+ 4,
699
+ 6,
700
+ 0,
701
+ 3,
702
+ 1,
703
+ "CONDITIONING"
704
+ ],
705
+ [
706
+ 5,
707
+ 4,
708
+ 1,
709
+ 7,
710
+ 0,
711
+ "CLIP"
712
+ ],
713
+ [
714
+ 6,
715
+ 7,
716
+ 0,
717
+ 3,
718
+ 2,
719
+ "CONDITIONING"
720
+ ],
721
+ [
722
+ 7,
723
+ 3,
724
+ 0,
725
+ 8,
726
+ 0,
727
+ "LATENT"
728
+ ],
729
+ [
730
+ 8,
731
+ 4,
732
+ 2,
733
+ 8,
734
+ 1,
735
+ "VAE"
736
+ ],
737
+ [
738
+ 27,
739
+ 22,
740
+ 0,
741
+ 23,
742
+ 0,
743
+ "STRING"
744
+ ],
745
+ [
746
+ 34,
747
+ 8,
748
+ 0,
749
+ 27,
750
+ 0,
751
+ "IMAGE"
752
+ ],
753
+ [
754
+ 38,
755
+ 18,
756
+ 0,
757
+ 24,
758
+ 0,
759
+ "STRING"
760
+ ],
761
+ [
762
+ 39,
763
+ 28,
764
+ 0,
765
+ 29,
766
+ 0,
767
+ "STRING"
768
+ ],
769
+ [
770
+ 42,
771
+ 29,
772
+ 0,
773
+ 18,
774
+ 1,
775
+ "STRING"
776
+ ],
777
+ [
778
+ 52,
779
+ 24,
780
+ 0,
781
+ 25,
782
+ 0,
783
+ "*"
784
+ ],
785
+ [
786
+ 53,
787
+ 24,
788
+ 0,
789
+ 6,
790
+ 1,
791
+ "STRING"
792
+ ],
793
+ [
794
+ 54,
795
+ 23,
796
+ 0,
797
+ 24,
798
+ 1,
799
+ "STRING"
800
+ ],
801
+ [
802
+ 58,
803
+ 31,
804
+ 0,
805
+ 22,
806
+ 0,
807
+ "STRING"
808
+ ],
809
+ [
810
+ 64,
811
+ 33,
812
+ 0,
813
+ 18,
814
+ 0,
815
+ "STRING"
816
+ ]
817
+ ],
818
+ "groups": [],
819
+ "config": {},
820
+ "extra": {},
821
+ "version": 0.4
822
+ }
ComfyUI/custom_nodes/ComfyUI_yanc/yanc.py ADDED
@@ -0,0 +1,1594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision.transforms as T
3
+ import torchvision.transforms.functional as F
4
+ import torch.nn.functional as NNF
5
+ import torch.nn.functional as NNF
6
+ from PIL import Image, ImageSequence, ImageOps
7
+ from PIL.PngImagePlugin import PngInfo
8
+ import random
9
+ import folder_paths
10
+ import hashlib
11
+ import numpy as np
12
+ import os
13
+ from pathlib import Path
14
+ from comfy.cli_args import args
15
+ from comfy_extras import nodes_mask as masks
16
+ import comfy.utils
17
+ import nodes as nodes
18
+ import json
19
+ import math
20
+ import datetime
21
+
22
+ yanc_root_name = "YANC"
23
+ yanc_sub_image = "/๐Ÿ˜ผ Image"
24
+ yanc_sub_text = "/๐Ÿ˜ผ Text"
25
+ yanc_sub_basics = "/๐Ÿ˜ผ Basics"
26
+ yanc_sub_nik = "/๐Ÿ˜ผ Noise Injection Sampler"
27
+ yanc_sub_masking = "/๐Ÿ˜ผ Masking"
28
+ yanc_sub_utils = "/๐Ÿ˜ผ Utils"
29
+ yanc_sub_experimental = "/๐Ÿ˜ผ Experimental"
30
+
31
+ # ------------------------------------------------------------------------------------------------------------------ #
32
+ # Functions #
33
+ # ------------------------------------------------------------------------------------------------------------------ #
34
+
35
+
36
+ def permute_to_image(image):
37
+ image = T.ToTensor()(image).unsqueeze(0)
38
+ return image.permute([0, 2, 3, 1])[:, :, :, :3]
39
+
40
+
41
+ def to_binary_mask(image):
42
+ images_sum = image.sum(axis=3)
43
+ return torch.where(images_sum > 0, 1.0, 0.)
44
+
45
+
46
+ def print_brown(text):
47
+ print("\033[33m" + text + "\033[0m")
48
+
49
+
50
+ def print_cyan(text):
51
+ print("\033[96m" + text + "\033[0m")
52
+
53
+
54
+ def print_green(text):
55
+ print("\033[92m" + text + "\033[0m")
56
+
57
+
58
+ def get_common_aspect_ratios():
59
+ return [
60
+ (4, 3),
61
+ (3, 2),
62
+ (16, 9),
63
+ (1, 1),
64
+ (21, 9),
65
+ (9, 16),
66
+ (3, 4),
67
+ (2, 3),
68
+ (5, 8)
69
+ ]
70
+
71
+
72
+ def get_sdxl_resolutions():
73
+ return [
74
+ ("1:1", (1024, 1024)),
75
+ ("3:4", (896, 1152)),
76
+ ("5:8", (832, 1216)),
77
+ ("9:16", (768, 1344)),
78
+ ("9:21", (640, 1536)),
79
+ ("4:3", (1152, 896)),
80
+ ("3:2", (1216, 832)),
81
+ ("16:9", (1344, 768)),
82
+ ("21:9", (1536, 640))
83
+ ]
84
+
85
+
86
+ def get_15_resolutions():
87
+ return [
88
+ ("1:1", (512, 512)),
89
+ ("2:3", (512, 768)),
90
+ ("3:4", (512, 682)),
91
+ ("3:2", (768, 512)),
92
+ ("16:9", (910, 512)),
93
+ ("1.85:1", (952, 512)),
94
+ ("2:1", (1024, 512)),
95
+ ("2.39:1", (1224, 512))
96
+ ]
97
+
98
+
99
+ def replace_dt_placeholders(string):
100
+ dt = datetime.datetime.now()
101
+
102
+ format_mapping = {
103
+ "%d", # Day
104
+ "%m", # Month
105
+ "%Y", # Year long
106
+ "%y", # Year short
107
+ "%H", # Hour 00 - 23
108
+ "%I", # Hour 00 - 12
109
+ "%p", # AM/PM
110
+ "%M", # Minute
111
+ "%S" # Second
112
+ }
113
+
114
+ for placeholder in format_mapping:
115
+ if placeholder in string:
116
+ string = string.replace(placeholder, dt.strftime(placeholder))
117
+
118
+ return string
119
+
120
+
121
+ def patch(model, multiplier): # RescaleCFG functionality from the ComfyUI nodes
122
+ def rescale_cfg(args):
123
+ cond = args["cond"]
124
+ uncond = args["uncond"]
125
+ cond_scale = args["cond_scale"]
126
+ sigma = args["sigma"]
127
+ sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1))
128
+ x_orig = args["input"]
129
+
130
+ # rescale cfg has to be done on v-pred model output
131
+ x = x_orig / (sigma * sigma + 1.0)
132
+ cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma)
133
+ uncond = ((x - (x_orig - uncond)) *
134
+ (sigma ** 2 + 1.0) ** 0.5) / (sigma)
135
+
136
+ # rescalecfg
137
+ x_cfg = uncond + cond_scale * (cond - uncond)
138
+ ro_pos = torch.std(cond, dim=(1, 2, 3), keepdim=True)
139
+ ro_cfg = torch.std(x_cfg, dim=(1, 2, 3), keepdim=True)
140
+
141
+ x_rescaled = x_cfg * (ro_pos / ro_cfg)
142
+ x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg
143
+
144
+ return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5)
145
+
146
+ m = model.clone()
147
+ m.set_model_sampler_cfg_function(rescale_cfg)
148
+ return (m, )
149
+
150
+
151
+ def blend_images(image1, image2, blend_mode, blend_rate):
152
+ if blend_mode == 'multiply':
153
+ return (1 - blend_rate) * image1 + blend_rate * (image1 * image2)
154
+ elif blend_mode == 'add':
155
+ return (1 - blend_rate) * image1 + blend_rate * (image1 + image2)
156
+ elif blend_mode == 'overlay':
157
+ blended_image = torch.where(
158
+ image1 < 0.5, 2 * image1 * image2, 1 - 2 * (1 - image1) * (1 - image2))
159
+ return (1 - blend_rate) * image1 + blend_rate * blended_image
160
+ elif blend_mode == 'soft light':
161
+ return (1 - blend_rate) * image1 + blend_rate * (soft_light_blend(image1, image2))
162
+ elif blend_mode == 'hard light':
163
+ return (1 - blend_rate) * image1 + blend_rate * (hard_light_blend(image1, image2))
164
+ elif blend_mode == 'lighten':
165
+ return (1 - blend_rate) * image1 + blend_rate * (lighten_blend(image1, image2))
166
+ elif blend_mode == 'darken':
167
+ return (1 - blend_rate) * image1 + blend_rate * (darken_blend(image1, image2))
168
+ else:
169
+ raise ValueError("Unsupported blend mode")
170
+
171
+
172
+ def soft_light_blend(base, blend):
173
+ return 2 * base * blend + base**2 * (1 - 2 * blend)
174
+
175
+
176
+ def hard_light_blend(base, blend):
177
+ return 2 * base * blend + (1 - 2 * base) * (1 - blend)
178
+
179
+
180
+ def lighten_blend(base, blend):
181
+ return torch.max(base, blend)
182
+
183
+
184
+ def darken_blend(base, blend):
185
+ return torch.min(base, blend)
186
+
187
+
188
+ # ------------------------------------------------------------------------------------------------------------------ #
189
+ # Comfy classes #
190
+ # ------------------------------------------------------------------------------------------------------------------ #
191
+ class YANCRotateImage:
192
+ def __init__(self):
193
+ pass
194
+
195
+ @classmethod
196
+ def INPUT_TYPES(s):
197
+ return {
198
+ "required": {
199
+ "image": ("IMAGE",),
200
+ "rotation_angle": ("INT", {
201
+ "default": 0,
202
+ "min": -359,
203
+ "max": 359,
204
+ "step": 1,
205
+ "display": "number"})
206
+ },
207
+ }
208
+
209
+ RETURN_TYPES = ("IMAGE", "MASK")
210
+ RETURN_NAMES = ("image", "mask")
211
+
212
+ FUNCTION = "do_it"
213
+
214
+ CATEGORY = yanc_root_name + yanc_sub_image
215
+
216
+ def do_it(self, image, rotation_angle):
217
+ samples = image.movedim(-1, 1)
218
+ height, width = F.get_image_size(samples)
219
+
220
+ rotation_angle = rotation_angle * -1
221
+ rotated_image = F.rotate(samples, angle=rotation_angle, expand=True)
222
+
223
+ empty_mask = Image.new('RGBA', (height, width), color=(255, 255, 255))
224
+ rotated_mask = F.rotate(empty_mask, angle=rotation_angle, expand=True)
225
+
226
+ img_out = rotated_image.movedim(1, -1)
227
+ mask_out = to_binary_mask(permute_to_image(rotated_mask))
228
+
229
+ return (img_out, mask_out)
230
+
231
+ # ------------------------------------------------------------------------------------------------------------------ #
232
+
233
+
234
+ class YANCText:
235
+ def __init__(self):
236
+ pass
237
+
238
+ @classmethod
239
+ def INPUT_TYPES(s):
240
+ return {
241
+ "required": {
242
+ "text": ("STRING", {
243
+ "multiline": True,
244
+ "default": "",
245
+ "dynamicPrompts": True
246
+ }),
247
+ },
248
+ }
249
+
250
+ RETURN_TYPES = ("STRING",)
251
+ RETURN_NAMES = ("text",)
252
+
253
+ FUNCTION = "do_it"
254
+
255
+ CATEGORY = yanc_root_name + yanc_sub_text
256
+
257
+ def do_it(self, text):
258
+ return (text,)
259
+
260
+ # ------------------------------------------------------------------------------------------------------------------ #
261
+
262
+
263
+ class YANCTextCombine:
264
+ def __init__(self):
265
+ pass
266
+
267
+ @classmethod
268
+ def INPUT_TYPES(s):
269
+ return {
270
+ "required": {
271
+ "text": ("STRING", {"forceInput": True}),
272
+ "text_append": ("STRING", {"forceInput": True}),
273
+ "delimiter": ("STRING", {"multiline": False, "default": ", "}),
274
+ "add_empty_line": ("BOOLEAN", {"default": False})
275
+ },
276
+ }
277
+
278
+ RETURN_TYPES = ("STRING",)
279
+ RETURN_NAMES = ("text",)
280
+
281
+ FUNCTION = "do_it"
282
+
283
+ CATEGORY = yanc_root_name + yanc_sub_text
284
+
285
+ def do_it(self, text, text_append, delimiter, add_empty_line):
286
+ if text_append.strip() == "":
287
+ delimiter = ""
288
+
289
+ str_list = [text, text_append]
290
+
291
+ if add_empty_line:
292
+ str_list = [text, "\n\n", text_append]
293
+
294
+ return (delimiter.join(str_list),)
295
+
296
+ # ------------------------------------------------------------------------------------------------------------------ #
297
+
298
+
299
+ class YANCTextPickRandomLine:
300
+ def __init__(self):
301
+ pass
302
+
303
+ @classmethod
304
+ def INPUT_TYPES(s):
305
+ return {
306
+ "required": {
307
+ "text": ("STRING", {"forceInput": True}),
308
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})
309
+ },
310
+ }
311
+
312
+ RETURN_TYPES = ("STRING",)
313
+ RETURN_NAMES = ("text",)
314
+
315
+ FUNCTION = "do_it"
316
+
317
+ CATEGORY = yanc_root_name + yanc_sub_text
318
+
319
+ def do_it(self, text, seed):
320
+ lines = text.splitlines()
321
+ random.seed(seed)
322
+ line = random.choice(lines)
323
+
324
+ return (line,)
325
+
326
+ # ------------------------------------------------------------------------------------------------------------------ #
327
+
328
+
329
+ class YANCClearText:
330
+ def __init__(self):
331
+ pass
332
+
333
+ @classmethod
334
+ def INPUT_TYPES(s):
335
+ return {
336
+ "required": {
337
+ "text": ("STRING", {"forceInput": True}),
338
+ "chance": ("FLOAT", {
339
+ "default": 0.0,
340
+ "min": 0.0,
341
+ "max": 1.0,
342
+ "step": 0.01,
343
+ "round": 0.001,
344
+ "display": "number"}),
345
+ },
346
+ }
347
+
348
+ RETURN_TYPES = ("STRING",)
349
+ RETURN_NAMES = ("text",)
350
+
351
+ FUNCTION = "do_it"
352
+
353
+ CATEGORY = yanc_root_name + yanc_sub_text
354
+
355
+ def do_it(self, text, chance):
356
+ dice = random.uniform(0, 1)
357
+
358
+ if chance > dice:
359
+ text = ""
360
+
361
+ return (text,)
362
+
363
+ @classmethod
364
+ def IS_CHANGED(s, text, chance):
365
+ return s.do_it(s, text, chance)
366
+
367
+ # ------------------------------------------------------------------------------------------------------------------ #
368
+
369
+
370
+ class YANCTextReplace:
371
+ def __init__(self):
372
+ pass
373
+
374
+ @classmethod
375
+ def INPUT_TYPES(s):
376
+ return {
377
+ "required": {
378
+ "text": ("STRING", {"forceInput": True}),
379
+ "find": ("STRING", {
380
+ "multiline": False,
381
+ "Default": "find"
382
+ }),
383
+ "replace": ("STRING", {
384
+ "multiline": False,
385
+ "Default": "replace"
386
+ }),
387
+ },
388
+ }
389
+
390
+ RETURN_TYPES = ("STRING",)
391
+ RETURN_NAMES = ("text",)
392
+
393
+ FUNCTION = "do_it"
394
+
395
+ CATEGORY = yanc_root_name + yanc_sub_text
396
+
397
+ def do_it(self, text, find, replace):
398
+ text = text.replace(find, replace)
399
+
400
+ return (text,)
401
+
402
+ # ------------------------------------------------------------------------------------------------------------------ #
403
+
404
+
405
+ class YANCTextRandomWeights:
406
+ def __init__(self):
407
+ pass
408
+
409
+ @classmethod
410
+ def INPUT_TYPES(s):
411
+ return {
412
+ "required": {
413
+ "text": ("STRING", {"forceInput": True}),
414
+ "min": ("FLOAT", {
415
+ "default": 1.0,
416
+ "min": 0.0,
417
+ "max": 10.0,
418
+ "step": 0.1,
419
+ "round": 0.1,
420
+ "display": "number"}),
421
+ "max": ("FLOAT", {
422
+ "default": 1.0,
423
+ "min": 0.0,
424
+ "max": 10.0,
425
+ "step": 0.1,
426
+ "round": 0.1,
427
+ "display": "number"}),
428
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
429
+ },
430
+ }
431
+
432
+ RETURN_TYPES = ("STRING",)
433
+ RETURN_NAMES = ("text",)
434
+
435
+ FUNCTION = "do_it"
436
+
437
+ CATEGORY = yanc_root_name + yanc_sub_text
438
+
439
+ def do_it(self, text, min, max, seed):
440
+ lines = text.splitlines()
441
+ count = 0
442
+ out = ""
443
+
444
+ random.seed(seed)
445
+
446
+ for line in lines:
447
+ count += 1
448
+ out += "({}:{})".format(line, round(random.uniform(min, max), 1)
449
+ ) + (", " if count < len(lines) else "")
450
+
451
+ return (out,)
452
+
453
+ # ------------------------------------------------------------------------------------------------------------------ #
454
+
455
+
456
+ class YANCLoadImageAndFilename:
457
+ @classmethod
458
+ def INPUT_TYPES(s):
459
+ input_dir = folder_paths.get_input_directory()
460
+ # files = [f for f in os.listdir(input_dir) if os.path.isfile(
461
+ # os.path.join(input_dir, f))]
462
+
463
+ files = []
464
+ for root, dirs, filenames in os.walk(input_dir):
465
+ for filename in filenames:
466
+ full_path = os.path.join(root, filename)
467
+ relative_path = os.path.relpath(full_path, input_dir)
468
+ relative_path = relative_path.replace("\\", "/")
469
+ files.append(relative_path)
470
+
471
+ return {"required":
472
+ {"image": (sorted(files), {"image_upload": True}),
473
+ "strip_extension": ("BOOLEAN", {"default": True})}
474
+ }
475
+
476
+ CATEGORY = yanc_root_name + yanc_sub_image
477
+
478
+ RETURN_TYPES = ("IMAGE", "MASK", "STRING")
479
+ RETURN_NAMES = ("IMAGE", "MASK", "FILENAME")
480
+
481
+ FUNCTION = "do_it"
482
+
483
+ def do_it(self, image, strip_extension):
484
+ image_path = folder_paths.get_annotated_filepath(image)
485
+ img = Image.open(image_path)
486
+ output_images = []
487
+ output_masks = []
488
+ for i in ImageSequence.Iterator(img):
489
+ i = ImageOps.exif_transpose(i)
490
+ if i.mode == 'I':
491
+ i = i.point(lambda i: i * (1 / 255))
492
+ image = i.convert("RGB")
493
+ image = np.array(image).astype(np.float32) / 255.0
494
+ image = torch.from_numpy(image)[None,]
495
+ if 'A' in i.getbands():
496
+ mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
497
+ mask = 1. - torch.from_numpy(mask)
498
+ else:
499
+ mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
500
+ output_images.append(image)
501
+ output_masks.append(mask.unsqueeze(0))
502
+
503
+ if len(output_images) > 1:
504
+ output_image = torch.cat(output_images, dim=0)
505
+ output_mask = torch.cat(output_masks, dim=0)
506
+ else:
507
+ output_image = output_images[0]
508
+ output_mask = output_masks[0]
509
+
510
+ if strip_extension:
511
+ filename = Path(image_path).stem
512
+ else:
513
+ filename = Path(image_path).name
514
+
515
+ return (output_image, output_mask, filename,)
516
+
517
+ @classmethod
518
+ def IS_CHANGED(s, image, strip_extension):
519
+ image_path = folder_paths.get_annotated_filepath(image)
520
+ m = hashlib.sha256()
521
+ with open(image_path, 'rb') as f:
522
+ m.update(f.read())
523
+ return m.digest().hex()
524
+
525
+ @classmethod
526
+ def VALIDATE_INPUTS(s, image, strip_extension):
527
+ if not folder_paths.exists_annotated_filepath(image):
528
+ return "Invalid image file: {}".format(image)
529
+
530
+ return True
531
+
532
+ # ------------------------------------------------------------------------------------------------------------------ #
533
+
534
+
535
+ class YANCSaveImage:
536
+ def __init__(self):
537
+ self.output_dir = folder_paths.get_output_directory()
538
+ self.type = "output"
539
+ self.prefix_append = ""
540
+ self.compress_level = 4
541
+
542
+ @classmethod
543
+ def INPUT_TYPES(s):
544
+ return {"required":
545
+ {"images": ("IMAGE", ),
546
+ "filename_prefix": ("STRING", {"default": "ComfyUI"}),
547
+ "folder": ("STRING", {"default": ""}),
548
+ "overwrite_warning": ("BOOLEAN", {"default": False}),
549
+ "include_metadata": ("BOOLEAN", {"default": True}),
550
+ "extension": (["png", "jpg"],),
551
+ "quality": ("INT", {"default": 95, "min": 0, "max": 100}),
552
+ },
553
+ "optional":
554
+ {"filename_opt": ("STRING", {"forceInput": True})},
555
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
556
+ }
557
+
558
+ RETURN_TYPES = ()
559
+ FUNCTION = "do_it"
560
+
561
+ OUTPUT_NODE = True
562
+
563
+ CATEGORY = yanc_root_name + yanc_sub_image
564
+
565
+ def do_it(self, images, overwrite_warning, include_metadata, extension, quality, filename_opt=None, folder=None, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None,):
566
+
567
+ if folder:
568
+ filename_prefix += self.prefix_append
569
+ filename_prefix = os.sep.join([folder, filename_prefix])
570
+ else:
571
+ filename_prefix += self.prefix_append
572
+
573
+ if "%" in filename_prefix:
574
+ filename_prefix = replace_dt_placeholders(filename_prefix)
575
+
576
+ full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(
577
+ filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
578
+
579
+ results = list()
580
+ for (batch_number, image) in enumerate(images):
581
+ i = 255. * image.cpu().numpy()
582
+ img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
583
+ metadata = None
584
+
585
+ if not filename_opt:
586
+
587
+ filename_with_batch_num = filename.replace(
588
+ "%batch_num%", str(batch_number))
589
+
590
+ counter = 1
591
+
592
+ if os.path.exists(full_output_folder) and os.listdir(full_output_folder):
593
+ filtered_filenames = list(filter(
594
+ lambda filename: filename.startswith(
595
+ filename_with_batch_num + "_")
596
+ and filename[len(filename_with_batch_num) + 1:-4].isdigit(),
597
+ os.listdir(full_output_folder)
598
+ ))
599
+
600
+ if filtered_filenames:
601
+ max_counter = max(
602
+ int(filename[len(filename_with_batch_num) + 1:-4])
603
+ for filename in filtered_filenames
604
+ )
605
+ counter = max_counter + 1
606
+
607
+ file = f"{filename_with_batch_num}_{counter:05}.{extension}"
608
+ else:
609
+ if len(images) == 1:
610
+ file = f"{filename_opt}.{extension}"
611
+ else:
612
+ raise Exception(
613
+ "Multiple images and filename detected: Images will overwrite themselves!")
614
+
615
+ save_path = os.path.join(full_output_folder, file)
616
+
617
+ if os.path.exists(save_path) and overwrite_warning:
618
+ raise Exception("Filename already exists.")
619
+ else:
620
+ if extension == "png":
621
+ if not args.disable_metadata and include_metadata:
622
+ metadata = PngInfo()
623
+ if prompt is not None:
624
+ metadata.add_text("prompt", json.dumps(prompt))
625
+ if extra_pnginfo is not None:
626
+ for x in extra_pnginfo:
627
+ metadata.add_text(x, json.dumps(extra_pnginfo[x]))
628
+
629
+ img.save(save_path, pnginfo=metadata,
630
+ compress_level=self.compress_level)
631
+ elif extension == "jpg":
632
+ if not args.disable_metadata and include_metadata:
633
+ metadata = {}
634
+
635
+ if prompt is not None:
636
+ metadata["prompt"] = prompt
637
+ if extra_pnginfo is not None:
638
+ for key, value in extra_pnginfo.items():
639
+ metadata[key] = value
640
+
641
+ metadata_json = json.dumps(metadata)
642
+ img.info["comment"] = metadata_json
643
+
644
+ img.save(save_path, quality=quality)
645
+
646
+ results.append({
647
+ "filename": file,
648
+ "subfolder": subfolder,
649
+ "type": self.type
650
+ })
651
+
652
+ return {"ui": {"images": results}}
653
+
654
+ # ------------------------------------------------------------------------------------------------------------------ #
655
+
656
+
657
+ class YANCLoadImageFromFolder:
658
+ @classmethod
659
+ def INPUT_TYPES(s):
660
+ return {"required":
661
+ {"image_folder": ("STRING", {"default": ""})
662
+ },
663
+ "optional":
664
+ {"index": ("INT",
665
+ {"default": -1,
666
+ "min": -1,
667
+ "max": 0xffffffffffffffff,
668
+ "forceInput": True})}
669
+ }
670
+
671
+ CATEGORY = yanc_root_name + yanc_sub_image
672
+
673
+ RETURN_TYPES = ("IMAGE", "STRING")
674
+ RETURN_NAMES = ("image", "file_name")
675
+ FUNCTION = "do_it"
676
+
677
+ def do_it(self, image_folder, index=-1):
678
+
679
+ image_path = os.path.join(
680
+ folder_paths.get_input_directory(), image_folder)
681
+
682
+ # Get all files in the directory
683
+ files = os.listdir(image_path)
684
+
685
+ # Filter out only image files
686
+ image_files = [file for file in files if file.endswith(
687
+ ('.jpg', '.jpeg', '.png', '.webp'))]
688
+
689
+ if index is not -1:
690
+ print_green("INFO: Index connected.")
691
+
692
+ if index > len(image_files) - 1:
693
+ index = index % len(image_files)
694
+ print_green(
695
+ "INFO: Index too high, falling back to: " + str(index))
696
+
697
+ image_file = image_files[index]
698
+ else:
699
+ print_green("INFO: Picking a random image.")
700
+ image_file = random.choice(image_files)
701
+
702
+ filename = Path(image_file).stem
703
+
704
+ img_path = os.path.join(image_path, image_file)
705
+
706
+ img = Image.open(img_path)
707
+ img = ImageOps.exif_transpose(img)
708
+ if img.mode == 'I':
709
+ img = img.point(lambda i: i * (1 / 255))
710
+ output_image = img.convert("RGB")
711
+ output_image = np.array(output_image).astype(np.float32) / 255.0
712
+ output_image = torch.from_numpy(output_image)[None,]
713
+
714
+ return (output_image, filename)
715
+
716
+ @classmethod
717
+ def IS_CHANGED(s, image_folder, index):
718
+ image_path = folder_paths.get_input_directory()
719
+ m = hashlib.sha256()
720
+ with open(image_path, 'rb') as f:
721
+ m.update(f.read())
722
+ return m.digest().hex()
723
+
724
+ # ------------------------------------------------------------------------------------------------------------------ #
725
+
726
+
727
+ class YANCIntToText:
728
+ @classmethod
729
+ def INPUT_TYPES(s):
730
+ return {"required":
731
+ {"int": ("INT",
732
+ {"default": 0,
733
+ "min": 0,
734
+ "max": 0xffffffffffffffff,
735
+ "forceInput": True}),
736
+ "leading_zeros": ("BOOLEAN", {"default": False}),
737
+ "length": ("INT",
738
+ {"default": 5,
739
+ "min": 0,
740
+ "max": 5})
741
+ }
742
+ }
743
+
744
+ CATEGORY = yanc_root_name + yanc_sub_basics
745
+
746
+ RETURN_TYPES = ("STRING",)
747
+ RETURN_NAMES = ("text",)
748
+ FUNCTION = "do_it"
749
+
750
+ def do_it(self, int, leading_zeros, length):
751
+
752
+ text = str(int)
753
+
754
+ if leading_zeros:
755
+ text = text.zfill(length)
756
+
757
+ return (text,)
758
+
759
+ # ------------------------------------------------------------------------------------------------------------------ #
760
+
761
+
762
+ class YANCInt:
763
+ @classmethod
764
+ def INPUT_TYPES(s):
765
+ return {"required":
766
+ {"seed": ("INT", {"default": 0, "min": 0,
767
+ "max": 0xffffffffffffffff}), }
768
+ }
769
+
770
+ CATEGORY = yanc_root_name + yanc_sub_basics
771
+
772
+ RETURN_TYPES = ("INT",)
773
+ RETURN_NAMES = ("int",)
774
+ FUNCTION = "do_it"
775
+
776
+ def do_it(self, seed):
777
+
778
+ return (seed,)
779
+
780
+ # ------------------------------------------------------------------------------------------------------------------ #
781
+
782
+
783
+ class YANCFloatToInt:
784
+ @classmethod
785
+ def INPUT_TYPES(s):
786
+ return {"required":
787
+ {"float": ("FLOAT", {"forceInput": True}),
788
+ "function": (["round", "floor", "ceil"],)
789
+ }
790
+ }
791
+
792
+ CATEGORY = yanc_root_name + yanc_sub_basics
793
+
794
+ RETURN_TYPES = ("INT",)
795
+ RETURN_NAMES = ("int",)
796
+ FUNCTION = "do_it"
797
+
798
+ def do_it(self, float, function):
799
+
800
+ result = round(float)
801
+
802
+ if function == "floor":
803
+ result = math.floor(float)
804
+ elif function == "ceil":
805
+ result = math.ceil(float)
806
+
807
+ return (int(result),)
808
+
809
+ # ------------------------------------------------------------------------------------------------------------------ #
810
+
811
+
812
+ class YANCScaleImageToSide:
813
+ @classmethod
814
+ def INPUT_TYPES(s):
815
+ return {"required":
816
+ {
817
+ "image": ("IMAGE",),
818
+ "scale_to": ("INT", {"default": 512}),
819
+ "side": (["shortest", "longest", "width", "height"],),
820
+ "interpolation": (["lanczos", "nearest", "bilinear", "bicubic", "area", "nearest-exact"],),
821
+ "modulo": ("INT", {"default": 0})
822
+ },
823
+ "optional":
824
+ {
825
+ "mask_opt": ("MASK",),
826
+ }
827
+ }
828
+
829
+ CATEGORY = yanc_root_name + yanc_sub_image
830
+
831
+ RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT", "FLOAT",)
832
+ RETURN_NAMES = ("image", "mask", "width", "height", "scale_ratio",)
833
+ FUNCTION = "do_it"
834
+
835
+ def do_it(self, image, scale_to, side, interpolation, modulo, mask_opt=None):
836
+
837
+ image = image.movedim(-1, 1)
838
+
839
+ image_height, image_width = image.shape[-2:]
840
+
841
+ longer_side = "height" if image_height > image_width else "width"
842
+ shorter_side = "height" if image_height < image_width else "width"
843
+
844
+ new_height, new_width, scale_ratio = 0, 0, 0
845
+
846
+ if side == "shortest":
847
+ side = shorter_side
848
+ elif side == "longest":
849
+ side = longer_side
850
+
851
+ if side == "width":
852
+ scale_ratio = scale_to / image_width
853
+ elif side == "height":
854
+ scale_ratio = scale_to / image_height
855
+
856
+ new_height = image_height * scale_ratio
857
+ new_width = image_width * scale_ratio
858
+
859
+ if modulo != 0:
860
+ new_height = new_height - (new_height % modulo)
861
+ new_width = new_width - (new_width % modulo)
862
+
863
+ new_width = int(new_width)
864
+ new_height = int(new_height)
865
+
866
+ image = comfy.utils.common_upscale(image,
867
+ new_width, new_height, interpolation, "center")
868
+
869
+ if mask_opt is not None:
870
+ mask_opt = mask_opt.permute(0, 1, 2)
871
+
872
+ mask_opt = mask_opt.unsqueeze(0)
873
+ mask_opt = NNF.interpolate(mask_opt, size=(
874
+ new_height, new_width), mode='bilinear', align_corners=False)
875
+
876
+ mask_opt = mask_opt.squeeze(0)
877
+ mask_opt = mask_opt.squeeze(0)
878
+
879
+ mask_opt = mask_opt.permute(0, 1)
880
+
881
+ image = image.movedim(1, -1)
882
+
883
+ return (image, mask_opt, new_width, new_height, 1.0/scale_ratio)
884
+
885
+ # ------------------------------------------------------------------------------------------------------------------ #
886
+
887
+
888
+ class YANCResolutionByAspectRatio:
889
+ @classmethod
890
+ def INPUT_TYPES(s):
891
+ return {"required":
892
+ {
893
+ "stable_diffusion": (["1.5", "SDXL"],),
894
+ "image": ("IMAGE",),
895
+ },
896
+ }
897
+
898
+ CATEGORY = yanc_root_name + yanc_sub_image
899
+
900
+ RETURN_TYPES = ("INT", "INT")
901
+ RETURN_NAMES = ("width", "height",)
902
+ FUNCTION = "do_it"
903
+
904
+ def do_it(self, stable_diffusion, image):
905
+
906
+ common_ratios = get_common_aspect_ratios()
907
+ resolutionsSDXL = get_sdxl_resolutions()
908
+ resolutions15 = get_15_resolutions()
909
+
910
+ resolution = resolutions15 if stable_diffusion == "1.5" else resolutionsSDXL
911
+
912
+ image_height, image_width = 0, 0
913
+
914
+ image = image.movedim(-1, 1)
915
+ image_height, image_width = image.shape[-2:]
916
+
917
+ gcd = math.gcd(image_width, image_height)
918
+ aspect_ratio = image_width // gcd, image_height // gcd
919
+
920
+ closest_ratio = min(common_ratios, key=lambda x: abs(
921
+ x[1] / x[0] - aspect_ratio[1] / aspect_ratio[0]))
922
+
923
+ closest_resolution = min(resolution, key=lambda res: abs(
924
+ res[1][0] * aspect_ratio[1] - res[1][1] * aspect_ratio[0]))
925
+
926
+ height, width = closest_resolution[1][1], closest_resolution[1][0]
927
+ sd_version = stable_diffusion if stable_diffusion == "SDXL" else "SD 1.5"
928
+
929
+ print_cyan(
930
+ f"Orig. Resolution: {image_width}x{image_height}, Aspect Ratio: {closest_ratio[0]}:{closest_ratio[1]}, Picked resolution: {width}x{height} for {sd_version}")
931
+
932
+ return (width, height,)
933
+
934
+ # ------------------------------------------------------------------------------------------------------------------ #
935
+
936
+
937
+ class YANCNIKSampler:
938
+ @classmethod
939
+ def INPUT_TYPES(s):
940
+ return {"required":
941
+ {"model": ("MODEL",),
942
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
943
+ "steps": ("INT", {"default": 30, "min": 1, "max": 10000}),
944
+ "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
945
+ "cfg_noise": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}),
946
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
947
+ "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
948
+ "positive": ("CONDITIONING", ),
949
+ "negative": ("CONDITIONING", ),
950
+ "latent_image": ("LATENT", ),
951
+ "noise_strength": ("FLOAT", {"default": 0.5, "min": 0.1, "max": 1.0, "step": 0.1, "round": 0.01}),
952
+ },
953
+ "optional":
954
+ {
955
+ "latent_noise": ("LATENT", ),
956
+ "mask": ("MASK",)
957
+ }
958
+ }
959
+
960
+ RETURN_TYPES = ("LATENT",)
961
+ RETURN_NAME = ("latent",)
962
+ FUNCTION = "do_it"
963
+
964
+ CATEGORY = yanc_root_name + yanc_sub_nik
965
+
966
+ def do_it(self, model, seed, steps, cfg, cfg_noise, sampler_name, scheduler, positive, negative, latent_image, noise_strength, latent_noise, inject_time=0.5, denoise=1.0, mask=None):
967
+
968
+ inject_at_step = round(steps * inject_time)
969
+ print("Inject at step: " + str(inject_at_step))
970
+
971
+ empty_latent = False if torch.all(
972
+ latent_image["samples"]) != 0 else True
973
+
974
+ print_cyan("Sampling first step image.")
975
+ samples_base_sampler = nodes.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
976
+ denoise=denoise, disable_noise=False, start_step=0, last_step=inject_at_step, force_full_denoise=True)
977
+
978
+ if mask is not None and empty_latent:
979
+ print_cyan(
980
+ "Sampling full image for unmasked areas. You can avoid this step by providing a non empty latent.")
981
+ samples_base_sampler2 = nodes.common_ksampler(
982
+ model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0)
983
+
984
+ samples_base_sampler = samples_base_sampler[0]
985
+
986
+ if mask is not None and not empty_latent:
987
+ samples_base_sampler = latent_image.copy()
988
+ samples_base_sampler["samples"] = latent_image["samples"].clone()
989
+
990
+ samples_out = latent_image.copy()
991
+ samples_out["samples"] = latent_image["samples"].clone()
992
+
993
+ samples_noise = latent_noise.copy()
994
+ samples_noise = latent_noise["samples"].clone()
995
+
996
+ if samples_base_sampler["samples"].shape != samples_noise.shape:
997
+ samples_noise.permute(0, 3, 1, 2)
998
+ samples_noise = comfy.utils.common_upscale(
999
+ samples_noise, samples_base_sampler["samples"].shape[3], samples_base_sampler["samples"].shape[2], 'bicubic', crop='center')
1000
+ samples_noise.permute(0, 2, 3, 1)
1001
+
1002
+ samples_o = samples_base_sampler["samples"] * (1 - noise_strength)
1003
+ samples_n = samples_noise * noise_strength
1004
+
1005
+ samples_out["samples"] = samples_o + samples_n
1006
+
1007
+ patched_model = patch(model=model, multiplier=0.65)[
1008
+ 0] if round(cfg_noise, 1) > 8.0 else model
1009
+
1010
+ print_cyan("Applying noise.")
1011
+ result = nodes.common_ksampler(patched_model, seed, steps, cfg_noise, sampler_name, scheduler, positive, negative, samples_out,
1012
+ denoise=denoise, disable_noise=False, start_step=inject_at_step, last_step=steps, force_full_denoise=False)[0]
1013
+
1014
+ if mask is not None:
1015
+ print_cyan("Composing...")
1016
+ destination = latent_image["samples"].clone(
1017
+ ) if not empty_latent else samples_base_sampler2[0]["samples"].clone()
1018
+ source = result["samples"]
1019
+ result["samples"] = masks.composite(
1020
+ destination, source, 0, 0, mask, 8)
1021
+
1022
+ return (result,)
1023
+
1024
+ # ------------------------------------------------------------------------------------------------------------------ #
1025
+
1026
+
1027
+ class YANCNoiseFromImage:
1028
+ @classmethod
1029
+ def INPUT_TYPES(s):
1030
+ return {"required":
1031
+ {
1032
+ "image": ("IMAGE",),
1033
+ "magnitude": ("FLOAT", {"default": 210.0, "min": 0.0, "max": 250.0, "step": 0.5, "round": 0.1}),
1034
+ "smoothness": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.5, "round": 0.1}),
1035
+ "noise_intensity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}),
1036
+ "noise_resize_factor": ("INT", {"default": 2.0, "min": 0, "max": 5.0}),
1037
+ "noise_blend_rate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.005, "round": 0.005}),
1038
+ "saturation_correction": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.5, "step": 0.1, "round": 0.1}),
1039
+ "blend_mode": (["off", "multiply", "add", "overlay", "soft light", "hard light", "lighten", "darken"],),
1040
+ "blend_rate": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}),
1041
+ },
1042
+ "optional":
1043
+ {
1044
+ "vae_opt": ("VAE", ),
1045
+ }
1046
+ }
1047
+
1048
+ CATEGORY = yanc_root_name + yanc_sub_nik
1049
+
1050
+ RETURN_TYPES = ("IMAGE", "LATENT")
1051
+ RETURN_NAMES = ("image", "latent")
1052
+ FUNCTION = "do_it"
1053
+
1054
+ def do_it(self, image, magnitude, smoothness, noise_intensity, noise_resize_factor, noise_blend_rate, saturation_correction, blend_mode, blend_rate, vae_opt=None):
1055
+
1056
+ # magnitude: The alpha for the elastic transform. Magnitude of displacements.
1057
+ # smoothness: The sigma for the elastic transform. Smoothness of displacements.
1058
+ # noise_intensity: Multiplier for the torch noise.
1059
+ # noise_resize_factor: Multiplier to enlarge the generated noise.
1060
+ # noise_blend_rate: Blend rate between the elastic and the noise.
1061
+ # saturation_correction: Well, for saturation correction.
1062
+ # blend_mode: Different blending modes to blend over batched images.
1063
+ # blend_rate: The strength of the blending.
1064
+
1065
+ noise_blend_rate = noise_blend_rate / 2.25
1066
+
1067
+ if blend_mode != "off":
1068
+ blended_image = image[0:1]
1069
+
1070
+ for i in range(1, image.size(0)):
1071
+ blended_image = blend_images(
1072
+ blended_image, image[i:i+1], blend_mode=blend_mode, blend_rate=blend_rate)
1073
+
1074
+ max_value = torch.max(blended_image)
1075
+ blended_image /= max_value
1076
+
1077
+ image = blended_image
1078
+
1079
+ noisy_image = torch.randn_like(image) * noise_intensity
1080
+ noisy_image = noisy_image.movedim(-1, 1)
1081
+
1082
+ image = image.movedim(-1, 1)
1083
+ image_height, image_width = image.shape[-2:]
1084
+
1085
+ r_mean = torch.mean(image[:, 0, :, :])
1086
+ g_mean = torch.mean(image[:, 1, :, :])
1087
+ b_mean = torch.mean(image[:, 2, :, :])
1088
+
1089
+ fill_value = (r_mean.item(), g_mean.item(), b_mean.item())
1090
+
1091
+ elastic_transformer = T.ElasticTransform(
1092
+ alpha=float(magnitude), sigma=float(smoothness), fill=fill_value)
1093
+ transformed_img = elastic_transformer(image)
1094
+
1095
+ if saturation_correction != 1.0:
1096
+ transformed_img = F.adjust_saturation(
1097
+ transformed_img, saturation_factor=saturation_correction)
1098
+
1099
+ if noise_resize_factor > 0:
1100
+ resize_cropper = T.RandomResizedCrop(
1101
+ size=(image_height // noise_resize_factor, image_width // noise_resize_factor))
1102
+
1103
+ resized_crop = resize_cropper(noisy_image)
1104
+
1105
+ resized_img = T.Resize(
1106
+ size=(image_height, image_width))(resized_crop)
1107
+ resized_img = resized_img.movedim(1, -1)
1108
+ else:
1109
+ resized_img = noisy_image.movedim(1, -1)
1110
+
1111
+ if image.size(0) == 1:
1112
+ result = transformed_img.squeeze(0).permute(
1113
+ 1, 2, 0) + (resized_img * noise_blend_rate)
1114
+ else:
1115
+ result = transformed_img.squeeze(0).permute(
1116
+ [0, 2, 3, 1])[:, :, :, :3] + (resized_img * noise_blend_rate)
1117
+
1118
+ latent = None
1119
+
1120
+ if vae_opt is not None:
1121
+ latent = vae_opt.encode(result[:, :, :, :3])
1122
+
1123
+ return (result, {"samples": latent})
1124
+
1125
+ # ------------------------------------------------------------------------------------------------------------------ #
1126
+
1127
+
1128
+ class YANCMaskCurves:
1129
+ @classmethod
1130
+ def INPUT_TYPES(s):
1131
+ return {"required":
1132
+ {
1133
+ "mask": ("MASK",),
1134
+ "low_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
1135
+ "mid_low_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
1136
+ "mid_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
1137
+ "high_value_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
1138
+ "brightness": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 3.0, "step": 0.05, "round": 0.05}),
1139
+ },
1140
+ }
1141
+
1142
+ CATEGORY = yanc_root_name + yanc_sub_masking
1143
+
1144
+ RETURN_TYPES = ("MASK",)
1145
+ RETURN_NAMES = ("mask",)
1146
+ FUNCTION = "do_it"
1147
+
1148
+ def do_it(self, mask, low_value_factor, mid_low_value_factor, mid_value_factor, high_value_factor, brightness):
1149
+
1150
+ low_mask = (mask < 0.25).float()
1151
+ mid_low_mask = ((mask >= 0.25) & (mask < 0.5)).float()
1152
+ mid_mask = ((mask >= 0.5) & (mask < 0.75)).float()
1153
+ high_mask = (mask >= 0.75).float()
1154
+
1155
+ low_mask = low_mask * (mask * low_value_factor)
1156
+ mid_low_mask = mid_low_mask * (mask * mid_low_value_factor)
1157
+ mid_mask = mid_mask * (mask * mid_value_factor)
1158
+ high_mask = high_mask * (mask * high_value_factor)
1159
+
1160
+ final_mask = low_mask + mid_low_mask + mid_mask + high_mask
1161
+ final_mask = final_mask * brightness
1162
+ final_mask = torch.clamp(final_mask, 0, 1)
1163
+
1164
+ return (final_mask,)
1165
+
1166
+
1167
+ # ------------------------------------------------------------------------------------------------------------------ #
1168
+
1169
+
1170
+ class YANCLightSourceMask:
1171
+ @classmethod
1172
+ def INPUT_TYPES(s):
1173
+ return {"required":
1174
+ {
1175
+ "image": ("IMAGE",),
1176
+ "threshold": ("FLOAT", {"default": 0.33, "min": 0.0, "max": 1.0, "step": 0.01, "round": 0.01}),
1177
+ },
1178
+ }
1179
+
1180
+ CATEGORY = yanc_root_name + yanc_sub_masking
1181
+
1182
+ RETURN_TYPES = ("MASK",)
1183
+ RETURN_NAMES = ("mask",)
1184
+ FUNCTION = "do_it"
1185
+
1186
+ def do_it(self, image, threshold):
1187
+ batch_size, height, width, _ = image.shape
1188
+
1189
+ kernel_size = max(33, int(0.05 * min(height, width)))
1190
+ kernel_size = kernel_size if kernel_size % 2 == 1 else kernel_size + 1
1191
+ sigma = max(1.0, kernel_size / 5.0)
1192
+
1193
+ masks = []
1194
+
1195
+ for i in range(batch_size):
1196
+ mask = image[i].permute(2, 0, 1)
1197
+ mask = torch.mean(mask, dim=0)
1198
+
1199
+ mask = torch.where(mask > threshold, mask * 3.0,
1200
+ torch.tensor(0.0, device=mask.device))
1201
+ mask.clamp_(min=0.0, max=1.0)
1202
+
1203
+ mask = mask.unsqueeze(0).unsqueeze(0)
1204
+
1205
+ blur = T.GaussianBlur(kernel_size=(
1206
+ kernel_size, kernel_size), sigma=(sigma, sigma))
1207
+ mask = blur(mask)
1208
+
1209
+ mask = mask.squeeze(0).squeeze(0)
1210
+ masks.append(mask)
1211
+
1212
+ masks = torch.stack(masks)
1213
+
1214
+ return (masks,)
1215
+
1216
+
1217
+ # ------------------------------------------------------------------------------------------------------------------ #
1218
+
1219
+
1220
+ class YANCNormalMapLighting:
1221
+
1222
+ def __init__(self):
1223
+ pass
1224
+
1225
+ @classmethod
1226
+ def INPUT_TYPES(cls):
1227
+ return {
1228
+ "required": {
1229
+ "diffuse_map": ("IMAGE",),
1230
+ "normal_map": ("IMAGE",),
1231
+ "specular_map": ("IMAGE",),
1232
+ "light_yaw": ("FLOAT", {"default": 45, "min": -180, "max": 180, "step": 1}),
1233
+ "light_pitch": ("FLOAT", {"default": 30, "min": -90, "max": 90, "step": 1}),
1234
+ "specular_power": ("FLOAT", {"default": 32, "min": 1, "max": 200, "step": 1}),
1235
+ "ambient_light": ("FLOAT", {"default": 0.50, "min": 0, "max": 1, "step": 0.01}),
1236
+ "NormalDiffuseStrength": ("FLOAT", {"default": 1.00, "min": 0, "max": 5.0, "step": 0.01}),
1237
+ "SpecularHighlightsStrength": ("FLOAT", {"default": 1.00, "min": 0, "max": 5.0, "step": 0.01}),
1238
+ "TotalGain": ("FLOAT", {"default": 1.00, "min": 0, "max": 2.0, "step": 0.01}),
1239
+ "color": ("INT", {"default": 0xFFFFFF, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}),
1240
+ },
1241
+ "optional": {
1242
+ "mask": ("MASK",),
1243
+ }
1244
+ }
1245
+
1246
+ RETURN_TYPES = ("IMAGE",)
1247
+
1248
+ FUNCTION = "do_it"
1249
+
1250
+ CATEGORY = yanc_root_name + yanc_sub_image
1251
+
1252
+ def resize_tensor(self, tensor, size):
1253
+ return torch.nn.functional.interpolate(tensor, size=size, mode='bilinear', align_corners=False)
1254
+
1255
+ def do_it(self, diffuse_map, normal_map, specular_map, light_yaw, light_pitch, specular_power, ambient_light, NormalDiffuseStrength, SpecularHighlightsStrength, TotalGain, color, mask=None,):
1256
+ if mask is None:
1257
+ mask = torch.ones_like(diffuse_map[:, :, :, 0])
1258
+
1259
+ diffuse_tensor = diffuse_map.permute(
1260
+ 0, 3, 1, 2)
1261
+ normal_tensor = normal_map.permute(
1262
+ 0, 3, 1, 2) * 2.0 - 1.0
1263
+ specular_tensor = specular_map.permute(
1264
+ 0, 3, 1, 2)
1265
+ mask_tensor = mask.unsqueeze(1)
1266
+ mask_tensor = mask_tensor.expand(-1, 3, -1, -1)
1267
+
1268
+ target_size = (diffuse_tensor.shape[2], diffuse_tensor.shape[3])
1269
+ normal_tensor = self.resize_tensor(normal_tensor, target_size)
1270
+ specular_tensor = self.resize_tensor(specular_tensor, target_size)
1271
+ mask_tensor = self.resize_tensor(mask_tensor, target_size)
1272
+
1273
+ normal_tensor = torch.nn.functional.normalize(normal_tensor, dim=1)
1274
+
1275
+ light_direction = self.euler_to_vector(light_yaw, light_pitch, 0)
1276
+ light_direction = light_direction.view(1, 3, 1, 1)
1277
+
1278
+ camera_direction = self.euler_to_vector(0, 0, 0)
1279
+ camera_direction = camera_direction.view(1, 3, 1, 1)
1280
+
1281
+ light_color = self.int_to_rgb(color)
1282
+ light_color_tensor = torch.tensor(
1283
+ light_color).view(1, 3, 1, 1)
1284
+
1285
+ diffuse = torch.sum(normal_tensor * light_direction,
1286
+ dim=1, keepdim=True)
1287
+ diffuse = torch.clamp(diffuse, 0, 1)
1288
+ diffuse = diffuse * light_color_tensor
1289
+
1290
+ half_vector = torch.nn.functional.normalize(
1291
+ light_direction + camera_direction, dim=1)
1292
+ specular = torch.sum(normal_tensor * half_vector, dim=1, keepdim=True)
1293
+ specular = torch.pow(torch.clamp(specular, 0, 1), specular_power)
1294
+
1295
+ specular = specular * light_color_tensor
1296
+
1297
+ if diffuse.shape != target_size:
1298
+ diffuse = self.resize_tensor(diffuse, target_size)
1299
+ if specular.shape != target_size:
1300
+ specular = self.resize_tensor(specular, target_size)
1301
+
1302
+ output_tensor = (diffuse_tensor * (ambient_light + diffuse * NormalDiffuseStrength) +
1303
+ specular_tensor * specular * SpecularHighlightsStrength) * TotalGain
1304
+
1305
+ output_tensor = output_tensor * mask_tensor + \
1306
+ diffuse_tensor * (1 - mask_tensor)
1307
+
1308
+ output_tensor = output_tensor.permute(
1309
+ 0, 2, 3, 1)
1310
+
1311
+ return (output_tensor,)
1312
+
1313
+ def euler_to_vector(self, yaw, pitch, roll):
1314
+ yaw_rad = np.radians(yaw)
1315
+ pitch_rad = np.radians(pitch)
1316
+ roll_rad = np.radians(roll)
1317
+
1318
+ cos_pitch = np.cos(pitch_rad)
1319
+ sin_pitch = np.sin(pitch_rad)
1320
+ cos_yaw = np.cos(yaw_rad)
1321
+ sin_yaw = np.sin(yaw_rad)
1322
+
1323
+ direction = np.array([
1324
+ sin_yaw * cos_pitch,
1325
+ sin_pitch,
1326
+ cos_pitch * cos_yaw
1327
+ ])
1328
+
1329
+ return torch.from_numpy(direction).float()
1330
+
1331
+ def int_to_rgb(self, color_int):
1332
+ r = (color_int >> 16) & 0xFF
1333
+ g = (color_int >> 8) & 0xFF
1334
+ b = color_int & 0xFF
1335
+
1336
+ return (r / 255.0, g / 255.0, b / 255.0)
1337
+
1338
+
1339
+ # ------------------------------------------------------------------------------------------------------------------ #
1340
+
1341
+
1342
+ class YANCRGBColor:
1343
+ @classmethod
1344
+ def INPUT_TYPES(s):
1345
+ return {"required":
1346
+ {
1347
+ "red": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
1348
+ "green": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
1349
+ "blue": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
1350
+ "plus_minus": ("INT", {"default": 0, "min": -255, "max": 255, "step": 1}),
1351
+ },
1352
+ }
1353
+
1354
+ CATEGORY = yanc_root_name + yanc_sub_utils
1355
+
1356
+ RETURN_TYPES = ("INT", "INT", "INT", "INT", "STRING",)
1357
+ RETURN_NAMES = ("int", "red", "green", "blue", "hex",)
1358
+ FUNCTION = "do_it"
1359
+
1360
+ def do_it(self, red, green, blue, plus_minus):
1361
+ total = red + green + blue
1362
+
1363
+ r_ratio = red / total if total != 0 else 0
1364
+ g_ratio = green / total if total != 0 else 0
1365
+ b_ratio = blue / total if total != 0 else 0
1366
+
1367
+ if plus_minus > 0:
1368
+ max_plus_minus = min((255 - red) / r_ratio if r_ratio > 0 else float('inf'),
1369
+ (255 - green) / g_ratio if g_ratio > 0 else float('inf'),
1370
+ (255 - blue) / b_ratio if b_ratio > 0 else float('inf'))
1371
+ effective_plus_minus = min(plus_minus, max_plus_minus)
1372
+ else:
1373
+ max_plus_minus = min(red / r_ratio if r_ratio > 0 else float('inf'),
1374
+ green / g_ratio if g_ratio > 0 else float('inf'),
1375
+ blue / b_ratio if b_ratio > 0 else float('inf'))
1376
+ effective_plus_minus = max(plus_minus, -max_plus_minus)
1377
+
1378
+ new_r = red + effective_plus_minus * r_ratio
1379
+ new_g = green + effective_plus_minus * g_ratio
1380
+ new_b = blue + effective_plus_minus * b_ratio
1381
+
1382
+ new_r = max(0, min(255, round(new_r)))
1383
+ new_g = max(0, min(255, round(new_g)))
1384
+ new_b = max(0, min(255, round(new_b)))
1385
+
1386
+ color = (new_r << 16) | (new_g << 8) | new_b
1387
+
1388
+ hex_color = "#{:02x}{:02x}{:02x}".format(
1389
+ int(new_r), int(new_g), int(new_b)).upper()
1390
+
1391
+ return (color, new_r, new_g, new_b, hex_color)
1392
+
1393
+
1394
+ # ------------------------------------------------------------------------------------------------------------------ #
1395
+
1396
+
1397
+ class YANCGetMeanColor:
1398
+ @classmethod
1399
+ def INPUT_TYPES(s):
1400
+ return {"required":
1401
+ {
1402
+ "image": ("IMAGE",),
1403
+ "amplify": ("BOOLEAN", {"default": False})
1404
+ },
1405
+ "optional":
1406
+ {
1407
+ "mask_opt": ("MASK",),
1408
+ },
1409
+ }
1410
+
1411
+ CATEGORY = yanc_root_name + yanc_sub_utils
1412
+
1413
+ RETURN_TYPES = ("INT", "INT", "INT", "INT", "STRING")
1414
+ RETURN_NAMES = ("int", "red", "green", "blue", "hex")
1415
+ FUNCTION = "do_it"
1416
+
1417
+ def do_it(self, image, amplify, mask_opt=None):
1418
+ masked_image = image.clone()
1419
+
1420
+ if mask_opt is not None:
1421
+ if mask_opt.shape[1:3] != image.shape[1:3]:
1422
+ raise ValueError(
1423
+ "Mask and image spatial dimensions must match.")
1424
+
1425
+ mask_opt = mask_opt.unsqueeze(-1)
1426
+ masked_image = masked_image * mask_opt
1427
+
1428
+ num_masked_pixels = torch.sum(mask_opt)
1429
+ if num_masked_pixels == 0:
1430
+ raise ValueError(
1431
+ "No masked pixels found in the image. Please set a mask.")
1432
+
1433
+ sum_r = torch.sum(masked_image[:, :, :, 0])
1434
+ sum_g = torch.sum(masked_image[:, :, :, 1])
1435
+ sum_b = torch.sum(masked_image[:, :, :, 2])
1436
+
1437
+ r_mean = sum_r / num_masked_pixels
1438
+ g_mean = sum_g / num_masked_pixels
1439
+ b_mean = sum_b / num_masked_pixels
1440
+ else:
1441
+ r_mean = torch.mean(masked_image[:, :, :, 0])
1442
+ g_mean = torch.mean(masked_image[:, :, :, 1])
1443
+ b_mean = torch.mean(masked_image[:, :, :, 2])
1444
+
1445
+ r_mean_255 = r_mean.item() * 255.0
1446
+ g_mean_255 = g_mean.item() * 255.0
1447
+ b_mean_255 = b_mean.item() * 255.0
1448
+
1449
+ if amplify:
1450
+ highest_value = max(r_mean_255, g_mean_255, b_mean_255)
1451
+ diff_to_max = 255.0 - highest_value
1452
+
1453
+ amp_factor = 1.0
1454
+
1455
+ r_mean_255 += diff_to_max * amp_factor * \
1456
+ (r_mean_255 / highest_value)
1457
+ g_mean_255 += diff_to_max * amp_factor * \
1458
+ (g_mean_255 / highest_value)
1459
+ b_mean_255 += diff_to_max * amp_factor * \
1460
+ (b_mean_255 / highest_value)
1461
+
1462
+ r_mean_255 = min(max(r_mean_255, 0), 255)
1463
+ g_mean_255 = min(max(g_mean_255, 0), 255)
1464
+ b_mean_255 = min(max(b_mean_255, 0), 255)
1465
+
1466
+ fill_value = (int(r_mean_255) << 16) + \
1467
+ (int(g_mean_255) << 8) + int(b_mean_255)
1468
+
1469
+ hex_color = "#{:02x}{:02x}{:02x}".format(
1470
+ int(r_mean_255), int(g_mean_255), int(b_mean_255)).upper()
1471
+
1472
+ return (fill_value, int(r_mean_255), int(g_mean_255), int(b_mean_255), hex_color,)
1473
+
1474
+
1475
+ # ------------------------------------------------------------------------------------------------------------------ #
1476
+
1477
+
1478
+ class YANCLayerWeights:
1479
+ @classmethod
1480
+ def INPUT_TYPES(s):
1481
+ return {"required":
1482
+ {
1483
+ "layer_0": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1484
+ "layer_1": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1485
+ "layer_2": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1486
+ "layer_3": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1487
+ "layer_4": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1488
+ "layer_5": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1489
+ "layer_6": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1490
+ "layer_7": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1491
+ "layer_8": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1492
+ "layer_9": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1493
+ "layer_10": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1494
+ "layer_11": ("FLOAT", {"default": 0, "min": 0, "max": 10.0, "step": 0.1}),
1495
+ }
1496
+ }
1497
+
1498
+ CATEGORY = yanc_root_name + yanc_sub_experimental
1499
+
1500
+ RETURN_TYPES = ("STRING", "STRING")
1501
+ RETURN_NAMES = ("layer_weights", "help")
1502
+ FUNCTION = "do_it"
1503
+
1504
+ def do_it(self, layer_0, layer_1, layer_2, layer_3, layer_4, layer_5, layer_6, layer_7, layer_8, layer_9, layer_10, layer_11,):
1505
+ result = ""
1506
+
1507
+ result = f"0:{layer_0:g}, 1:{layer_1:g}, 2:{layer_2:g}, 3:{layer_3:g}, 4:{layer_4:g}, 5:{layer_5:g}, 6:{layer_6:g}, 7:{layer_7:g}, 8:{layer_8:g}, 9:{layer_9:g}, 10:{layer_10:g}, 11:{layer_11:g}"
1508
+
1509
+ help = """layer_3: Composition
1510
+ layer_6: Style
1511
+ """
1512
+
1513
+ return (result, help)
1514
+
1515
+
1516
+ # ------------------------------------------------------------------------------------------------------------------ #
1517
+ NODE_CLASS_MAPPINGS = {
1518
+ # Image
1519
+ "> Rotate Image": YANCRotateImage,
1520
+ "> Scale Image to Side": YANCScaleImageToSide,
1521
+ "> Resolution by Aspect Ratio": YANCResolutionByAspectRatio,
1522
+ "> Load Image": YANCLoadImageAndFilename,
1523
+ "> Save Image": YANCSaveImage,
1524
+ "> Load Image From Folder": YANCLoadImageFromFolder,
1525
+ "> Normal Map Lighting": YANCNormalMapLighting,
1526
+
1527
+ # Text
1528
+ "> Text": YANCText,
1529
+ "> Text Combine": YANCTextCombine,
1530
+ "> Text Pick Random Line": YANCTextPickRandomLine,
1531
+ "> Clear Text": YANCClearText,
1532
+ "> Text Replace": YANCTextReplace,
1533
+ "> Text Random Weights": YANCTextRandomWeights,
1534
+
1535
+ # Basics
1536
+ "> Int to Text": YANCIntToText,
1537
+ "> Int": YANCInt,
1538
+ "> Float to Int": YANCFloatToInt,
1539
+
1540
+ # Noise Injection Sampler
1541
+ "> NIKSampler": YANCNIKSampler,
1542
+ "> Noise From Image": YANCNoiseFromImage,
1543
+
1544
+ # Masking
1545
+ "> Mask Curves": YANCMaskCurves,
1546
+ "> Light Source Mask": YANCLightSourceMask,
1547
+
1548
+ # Utils
1549
+ "> Get Mean Color": YANCGetMeanColor,
1550
+ "> RGB Color": YANCRGBColor,
1551
+
1552
+ # Experimental
1553
+ "> Layer Weights (for IPAMS)": YANCLayerWeights,
1554
+ }
1555
+
1556
+ # A dictionary that contains the friendly/humanly readable titles for the nodes
1557
+ NODE_DISPLAY_NAME_MAPPINGS = {
1558
+ # Image
1559
+ "> Rotate Image": "๐Ÿ˜ผ> Rotate Image",
1560
+ "> Scale Image to Side": "๐Ÿ˜ผ> Scale Image to Side",
1561
+ "> Resolution by Aspect Ratio": "๐Ÿ˜ผ> Resolution by Aspect Ratio",
1562
+ "> Load Image": "๐Ÿ˜ผ> Load Image",
1563
+ "> Save Image": "๐Ÿ˜ผ> Save Image",
1564
+ "> Load Image From Folder": "๐Ÿ˜ผ> Load Image From Folder",
1565
+ "> Normal Map Lighting": "๐Ÿ˜ผ> Normal Map Lighting",
1566
+
1567
+ # Text
1568
+ "> Text": "๐Ÿ˜ผ> Text",
1569
+ "> Text Combine": "๐Ÿ˜ผ> Text Combine",
1570
+ "> Text Pick Random Line": "๐Ÿ˜ผ> Text Pick Random Line",
1571
+ "> Clear Text": "๐Ÿ˜ผ> Clear Text",
1572
+ "> Text Replace": "๐Ÿ˜ผ> Text Replace",
1573
+ "> Text Random Weights": "๐Ÿ˜ผ> Text Random Weights",
1574
+
1575
+ # Basics
1576
+ "> Int to Text": "๐Ÿ˜ผ> Int to Text",
1577
+ "> Int": "๐Ÿ˜ผ> Int",
1578
+ "> Float to Int": "๐Ÿ˜ผ> Float to Int",
1579
+
1580
+ # Noise Injection Sampler
1581
+ "> NIKSampler": "๐Ÿ˜ผ> NIKSampler",
1582
+ "> Noise From Image": "๐Ÿ˜ผ> Noise From Image",
1583
+
1584
+ # Masking
1585
+ "> Mask Curves": "๐Ÿ˜ผ> Mask Curves",
1586
+ "> Light Source Mask": "๐Ÿ˜ผ> Light Source Mask",
1587
+
1588
+ # Utils
1589
+ "> Get Mean Color": "๐Ÿ˜ผ> Get Mean Color",
1590
+ "> RGB Color": "๐Ÿ˜ผ> RGB Color",
1591
+
1592
+ # Experimental
1593
+ "> Layer Weights (for IPAMS)": "๐Ÿ˜ผ> Layer Weights (for IPAMS)",
1594
+ }