Zhiding commited on
Commit
0adc73f
Β·
1 Parent(s): 51a2a40
README.md CHANGED
@@ -42,7 +42,6 @@ We provide the following models:
42
  | Eagle2-1B | [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) | Siglip | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-1B)|
43
  | Eagle2-2B | [Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) | Siglip | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-2B)|
44
  | Eagle2-9B | [Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) | Siglip+ConvNext | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-9B)|
45
- | Eagle2-32B | [Qwen2.5-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct) | Siglip+ConvNext | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-32B)|
46
 
47
  ## Benchmark Results
48
  | Benchmark | InternVL2-2B | InternVL2.5-2B | InternVL2-4B |Qwen2-VL-2B| Eagle2-2B|
 
42
  | Eagle2-1B | [Qwen2.5-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct) | Siglip | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-1B)|
43
  | Eagle2-2B | [Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) | Siglip | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-2B)|
44
  | Eagle2-9B | [Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) | Siglip+ConvNext | 16K| [πŸ€— link](https://huggingface.co/NVIDIA/Eagle2-9B)|
 
45
 
46
  ## Benchmark Results
47
  | Benchmark | InternVL2-2B | InternVL2.5-2B | InternVL2-4B |Qwen2-VL-2B| Eagle2-2B|
configuration_eagle_chat.py CHANGED
@@ -1,7 +1,7 @@
1
  # --------------------------------------------------------
2
  # Eagle2
3
  # Copyright (c) 2025 NVIDIA
4
- # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
 
7
  import copy
 
1
  # --------------------------------------------------------
2
  # Eagle2
3
  # Copyright (c) 2025 NVIDIA
4
+ # Licensed under The Apache License [see LICENSE for details]
5
  # --------------------------------------------------------
6
 
7
  import copy
configuration_multi_backbone_channel_concatentation_model.py CHANGED
@@ -1,7 +1,7 @@
1
  # --------------------------------------------------------
2
  # Eagle2
3
  # Copyright (c) 2025 NVIDIA
4
- # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
 
7
  import os
 
1
  # --------------------------------------------------------
2
  # Eagle2
3
  # Copyright (c) 2025 NVIDIA
4
+ # Licensed under The Apache License [see LICENSE for details]
5
  # --------------------------------------------------------
6
 
7
  import os
modeling_eagle_chat.py CHANGED
@@ -1,7 +1,7 @@
1
  # --------------------------------------------------------
2
  # Eagle2
3
  # Copyright (c) 2025 NVIDIA
4
- # Licensed under The MIT License [see LICENSE for details]
5
  # --------------------------------------------------------
6
 
7
  import warnings
 
1
  # --------------------------------------------------------
2
  # Eagle2
3
  # Copyright (c) 2025 NVIDIA
4
+ # Licensed under The Apache License [see LICENSE for details]
5
  # --------------------------------------------------------
6
 
7
  import warnings
modeling_siglip.py CHANGED
@@ -1,3 +1,11 @@
 
 
 
 
 
 
 
 
1
  # coding=utf-8
2
  # Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
3
  #
@@ -374,6 +382,10 @@ class SiglipAttention(nn.Module):
374
  """Input shape: Batch x Time x Channel"""
375
  if self.use_flash_attn:
376
  return self._flash_attn(hidden_states)
 
 
 
 
377
  batch_size, q_len, _ = hidden_states.size()
378
 
379
  query_states = self.q_proj(hidden_states)
 
1
+ # --------------------------------------------------------
2
+ # Eagle2
3
+ # Copyright (c) 2025 NVIDIA
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Support flash-attention in SigLIP
6
+ # --------------------------------------------------------
7
+
8
+
9
  # coding=utf-8
10
  # Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
11
  #
 
382
  """Input shape: Batch x Time x Channel"""
383
  if self.use_flash_attn:
384
  return self._flash_attn(hidden_states)
385
+ else:
386
+ return self._vanilla_attn(hidden_states, attention_mask, output_attentions)
387
+
388
+ def _vanilla_attn(self, hidden_states, attention_mask=None, output_attentions=False):
389
  batch_size, q_len, _ = hidden_states.size()
390
 
391
  query_states = self.q_proj(hidden_states)
multi_backbone_channel_concatenation_encoder.py CHANGED
@@ -1,13 +1,15 @@
 
 
 
 
 
 
1
  import torch, os
2
  import torch.nn as nn
3
  from torch.utils.checkpoint import checkpoint
4
 
5
  from .siglip_vision_tower import SiglipVisionTower
6
 
7
- # from .hr_clip_encoder import HRCLIPVisionTower
8
- # from .eva_vit import EVAVITVisionTower
9
- # from .SAM.modeling_sam import SAMVisionTower
10
- # from .pix2struct_large import Pix2StructLargeVisionTower
11
  import torch.nn.functional as F
12
  from torch.nn.init import trunc_normal_
13
  from copy import deepcopy
 
1
+ # --------------------------------------------------------
2
+ # Eagle2
3
+ # Copyright (c) 2025 NVIDIA
4
+ # Licensed under The Apache License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
  import torch, os
8
  import torch.nn as nn
9
  from torch.utils.checkpoint import checkpoint
10
 
11
  from .siglip_vision_tower import SiglipVisionTower
12
 
 
 
 
 
13
  import torch.nn.functional as F
14
  from torch.nn.init import trunc_normal_
15
  from copy import deepcopy
multi_backbone_channel_concatentation_model.py CHANGED
@@ -1,3 +1,9 @@
 
 
 
 
 
 
1
  import torch.nn as nn
2
 
3
  from transformers.modeling_outputs import BaseModelOutputWithPooling
 
1
+ # --------------------------------------------------------
2
+ # Eagle2
3
+ # Copyright (c) 2025 NVIDIA
4
+ # Licensed under The Apache License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
  import torch.nn as nn
8
 
9
  from transformers.modeling_outputs import BaseModelOutputWithPooling