fix-rotatry-fa-imports (#67)
Browse files- fix: rotatry fa imports (bb0e06912f9ef936566778467e6912f75ce4bfa2)
- qwen2_5_vl.py +2 -2
qwen2_5_vl.py
CHANGED
|
@@ -345,8 +345,8 @@ from transformers.utils import auto_docstring, can_return_tuple, is_torch_flex_a
|
|
| 345 |
|
| 346 |
|
| 347 |
if is_flash_attn_available():
|
| 348 |
-
from
|
| 349 |
-
|
| 350 |
|
| 351 |
if is_flash_attn_available():
|
| 352 |
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
|
|
|
| 345 |
|
| 346 |
|
| 347 |
if is_flash_attn_available():
|
| 348 |
+
from flash_attn.layers.rotary import apply_rotary_emb
|
| 349 |
+
from flash_attn import flash_attn_varlen_func
|
| 350 |
|
| 351 |
if is_flash_attn_available():
|
| 352 |
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|