cuierfei commited on
Commit
dcbf85f
·
verified ·
1 Parent(s): 5671375

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +242 -0
README.md ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ pipeline_tag: image-text-to-text
4
+ library_name: transformers
5
+ base_model:
6
+ - OpenGVLab/InternViT-300M-448px
7
+ - internlm/internlm2_5-7b-chat
8
+ new_version: OpenGVLab/InternVL2_5-8B
9
+ base_model_relation: merge
10
+ language:
11
+ - multilingual
12
+ tags:
13
+ - internvl
14
+ - custom_code
15
+ ---
16
+
17
+ # InternOmni
18
+
19
+ ## Quick Start
20
+
21
+ We provide an example code to run `InternOmni` using `transformers`.
22
+
23
+ > Please use transformers>=4.37.2 to ensure the model works normally.
24
+
25
+
26
+ ### Inference with Transformers
27
+
28
+ ```python
29
+ import numpy as np
30
+ import torch
31
+ import torchvision.transforms as T
32
+ from PIL import Image
33
+ from torchvision.transforms.functional import InterpolationMode
34
+ from transformers import AutoModel, AutoTokenizer
35
+ import librosa
36
+ from transformers.processing_utils import ProcessorMixin
37
+ import torch
38
+
39
+ class WhisperProcessor(ProcessorMixin):
40
+ attributes = ["feature_extractor"]
41
+ feature_extractor_class = "WhisperFeatureExtractor"
42
+ def __init__(self, feature_extractor):
43
+ super().__init__(feature_extractor)
44
+ self.current_processor = self.feature_extractor
45
+ self._in_target_context_manager = False
46
+
47
+ def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
48
+ return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
49
+
50
+ def get_T_after_cnn(self,L_in, dilation=1):
51
+ for (padding, kernel_size, stride) in eval("[(1,3,1)] + [(1,3,2)] "):
52
+ L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1
53
+ L_out = 1 + L_out // stride
54
+ L_in = L_out
55
+ return L_out
56
+
57
+ def __call__(self, *args, **kwargs):
58
+ if self._in_target_context_manager:
59
+ return self.current_processor(*args, **kwargs)
60
+
61
+ audio = kwargs.pop("audio", None)
62
+ sampling_rate = kwargs.pop("sampling_rate", 16000)
63
+ text = kwargs.pop("text", None)
64
+ if len(args) > 0:
65
+ audio = args[0]
66
+ args = args[1:]
67
+
68
+ if audio is None and text is None:
69
+ raise ValueError("You need to specify either an `audio` or `text` input to process.")
70
+
71
+ if audio is not None:
72
+ L = (audio.shape[0] if audio.shape[0] <= 480000 else 480000) # max_length < 30s
73
+ mel_len = L // 160
74
+ audio_len_after_cnn = self.get_T_after_cnn(mel_len)
75
+ audio_token_num = (audio_len_after_cnn - 2) // 2 + 1
76
+ inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
77
+ inputs['audio_len_after_cnn'] = torch.tensor(audio_len_after_cnn, dtype=torch.long)
78
+ inputs['audio_token_num'] = torch.tensor(audio_token_num, dtype=torch.long)
79
+ if text is not None:
80
+ encodings = self.tokenizer(text, **kwargs)
81
+
82
+ if text is None:
83
+ return inputs
84
+
85
+ elif audio is None:
86
+ return encodings
87
+ else:
88
+ inputs["labels"] = encodings["input_ids"]
89
+ return inputs
90
+
91
+ def batch_decode(self, *args, **kwargs):
92
+ return self.tokenizer.batch_decode(*args, **kwargs)
93
+
94
+ def decode(self, *args, **kwargs):
95
+ return self.tokenizer.decode(*args, **kwargs)
96
+
97
+ def get_prompt_ids(self, text: str, return_tensors="np"):
98
+ return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
99
+
100
+ IMAGENET_MEAN = (0.485, 0.456, 0.406)
101
+ IMAGENET_STD = (0.229, 0.224, 0.225)
102
+
103
+ def build_transform(input_size):
104
+ MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
105
+ transform = T.Compose([
106
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
107
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
108
+ T.ToTensor(),
109
+ T.Normalize(mean=MEAN, std=STD)
110
+ ])
111
+ return transform
112
+
113
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
114
+ best_ratio_diff = float('inf')
115
+ best_ratio = (1, 1)
116
+ area = width * height
117
+ for ratio in target_ratios:
118
+ target_aspect_ratio = ratio[0] / ratio[1]
119
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
120
+ if ratio_diff < best_ratio_diff:
121
+ best_ratio_diff = ratio_diff
122
+ best_ratio = ratio
123
+ elif ratio_diff == best_ratio_diff:
124
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
125
+ best_ratio = ratio
126
+ return best_ratio
127
+
128
+ def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
129
+ orig_width, orig_height = image.size
130
+ aspect_ratio = orig_width / orig_height
131
+
132
+ # calculate the existing image aspect ratio
133
+ target_ratios = set(
134
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
135
+ i * j <= max_num and i * j >= min_num)
136
+ target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
137
+
138
+ # find the closest aspect ratio to the target
139
+ target_aspect_ratio = find_closest_aspect_ratio(
140
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
141
+
142
+ # calculate the target width and height
143
+ target_width = image_size * target_aspect_ratio[0]
144
+ target_height = image_size * target_aspect_ratio[1]
145
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
146
+
147
+ # resize the image
148
+ resized_img = image.resize((target_width, target_height))
149
+ processed_images = []
150
+ for i in range(blocks):
151
+ box = (
152
+ (i % (target_width // image_size)) * image_size,
153
+ (i // (target_width // image_size)) * image_size,
154
+ ((i % (target_width // image_size)) + 1) * image_size,
155
+ ((i // (target_width // image_size)) + 1) * image_size
156
+ )
157
+ # split the image
158
+ split_img = resized_img.crop(box)
159
+ processed_images.append(split_img)
160
+ assert len(processed_images) == blocks
161
+ if use_thumbnail and len(processed_images) != 1:
162
+ thumbnail_img = image.resize((image_size, image_size))
163
+ processed_images.append(thumbnail_img)
164
+ return processed_images
165
+
166
+ def load_image(image_file, input_size=448, max_num=12):
167
+ image = Image.open(image_file).convert('RGB')
168
+ transform = build_transform(input_size=input_size)
169
+ images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
170
+ pixel_values = [transform(image) for image in images]
171
+ pixel_values = torch.stack(pixel_values)
172
+ return pixel_values
173
+
174
+ def load_audio(audio_file, audio_processor):
175
+ audio_values, _ = librosa.load(audio_file, sr=16000) # sample rate should be 16000
176
+
177
+ audio_process_values = audio_processor(audio_values, sampling_rate=16000, return_tensors="pt")
178
+ input_features = audio_process_values['input_features']
179
+ audio_len_after_cnn = audio_process_values['audio_len_after_cnn']
180
+ audio_token_num = audio_process_values['audio_token_num']
181
+
182
+
183
+ audio_input = {'audio_values': input_features,
184
+ 'audio_len_after_cnn': audio_len_after_cnn,
185
+ 'audio_token_num': audio_token_num,
186
+ }
187
+ return audio_input
188
+
189
+ path = 'OpenGVLab/InternOmni'
190
+ model = AutoModel.from_pretrained(
191
+ path,
192
+ torch_dtype=torch.bfloat16,
193
+ low_cpu_mem_usage=True,
194
+ trust_remote_code=True).eval().cuda()
195
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False)
196
+ audio_processor = WhisperProcessor.from_pretrained(path)
197
+ # set the max number of tiles in `max_num`
198
+ pixel_values = load_image('./1.jpg', max_num=12).to(torch.bfloat16).cuda()
199
+ audio = load_audio('./1.wav', audio_processor)
200
+ generation_config = dict(max_new_tokens=1024, do_sample=True)
201
+
202
+ # question = '请将这段语音识别成文字,并以文字形式展示出来。'
203
+ response = model.Audio_chat(tokenizer=tokenizer, pixel_values=pixel_values,audio=audio, question=None, generation_config)
204
+ print(f'Assistant: {response}')
205
+
206
+ ```
207
+
208
+ ## License
209
+
210
+ This project is released under the MIT License. This project uses the pre-trained internVL2_8b as a component, which is licensed under the Apache License 2.0.
211
+
212
+ ## Citation
213
+
214
+ If you find this project useful in your research, please consider citing:
215
+
216
+ ```BibTeX
217
+ @article{chen2024expanding,
218
+ title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling},
219
+ author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others},
220
+ journal={arXiv preprint arXiv:2412.05271},
221
+ year={2024}
222
+ }
223
+ @article{gao2024mini,
224
+ title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance},
225
+ author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others},
226
+ journal={arXiv preprint arXiv:2410.16261},
227
+ year={2024}
228
+ }
229
+ @article{chen2024far,
230
+ title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites},
231
+ author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others},
232
+ journal={arXiv preprint arXiv:2404.16821},
233
+ year={2024}
234
+ }
235
+ @inproceedings{chen2024internvl,
236
+ title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks},
237
+ author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others},
238
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
239
+ pages={24185--24198},
240
+ year={2024}
241
+ }
242
+ ```