update des
Browse files- app.py +1 -1
- flux/modules/layers.py +3 -3
app.py
CHANGED
@@ -218,7 +218,7 @@ def create_demo(model_name: str):
|
|
218 |
4οΈβ£ Click the "Edit" button to generate your edited image! <br>
|
219 |
|
220 |
ππ [<b>Important</b>] Less skip steps, "re_init" and "attn_mask" will enhance the editing performance, making the results aligned with your text but may lead to discontinuous images. <br>
|
221 |
-
|
222 |
"""
|
223 |
article = r"""
|
224 |
If our work is helpful, please help to β the <a href='https://github.com/Xilluill/KV-Edit' target='_blank'>Github Repo</a>. Thanks!
|
|
|
218 |
4οΈβ£ Click the "Edit" button to generate your edited image! <br>
|
219 |
|
220 |
ππ [<b>Important</b>] Less skip steps, "re_init" and "attn_mask" will enhance the editing performance, making the results aligned with your text but may lead to discontinuous images. <br>
|
221 |
+
If you fail because of these three, we recommend trying to increase "attn_scale" to increase attention between mask and background.<br>
|
222 |
"""
|
223 |
article = r"""
|
224 |
If our work is helpful, please help to β the <a href='https://github.com/Xilluill/KV-Edit' target='_blank'>Github Repo</a>. Thanks!
|
flux/modules/layers.py
CHANGED
@@ -300,8 +300,7 @@ class DoubleStreamBlock_kv(DoubleStreamBlock):
|
|
300 |
if 'attention_mask' in info:
|
301 |
attn = attention(q, k, v, pe=pe,attention_mask=info['attention_mask'])
|
302 |
else:
|
303 |
-
|
304 |
-
attn = attention(q, k, v, pe=pe, pe_q = info['pe_mask'],attention_mask=info['attention_scale'])
|
305 |
|
306 |
# elif feature_k_name in info['feature']:
|
307 |
else:
|
@@ -315,7 +314,8 @@ class DoubleStreamBlock_kv(DoubleStreamBlock):
|
|
315 |
q = torch.cat((txt_q, img_q), dim=2)
|
316 |
k = torch.cat((txt_k, source_img_k), dim=2)
|
317 |
v = torch.cat((txt_v, source_img_v), dim=2)
|
318 |
-
attn = attention(q, k, v, pe=pe, pe_q = info['pe_mask'])
|
|
|
319 |
|
320 |
|
321 |
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|
|
|
300 |
if 'attention_mask' in info:
|
301 |
attn = attention(q, k, v, pe=pe,attention_mask=info['attention_mask'])
|
302 |
else:
|
303 |
+
attn = attention(q, k, v, pe=pe)
|
|
|
304 |
|
305 |
# elif feature_k_name in info['feature']:
|
306 |
else:
|
|
|
314 |
q = torch.cat((txt_q, img_q), dim=2)
|
315 |
k = torch.cat((txt_k, source_img_k), dim=2)
|
316 |
v = torch.cat((txt_v, source_img_v), dim=2)
|
317 |
+
# attn = attention(q, k, v, pe=pe, pe_q = info['pe_mask'])
|
318 |
+
attn = attention(q, k, v, pe=pe, pe_q = info['pe_mask'],attention_mask=info['attention_scale'])
|
319 |
|
320 |
|
321 |
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
|