Richhiey commited on
Commit
c7bfc07
·
verified ·
1 Parent(s): b3b3061

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -23
  2. LICENSE +674 -0
  3. README.md +13 -0
  4. config/.DS_Store +0 -0
  5. config/__pycache__/task.cpython-310.pyc +0 -0
  6. config/task.py +119 -0
  7. extras/datasets_eval_testing.py +42 -0
  8. extras/examples/1727.mid +0 -0
  9. extras/examples/805_S00_8s83.mid +0 -0
  10. extras/examples/drum.mid +0 -0
  11. extras/examples/drum_converted.mid +0 -0
  12. extras/examples/piano.mid +0 -0
  13. extras/install_deepspeed.md +28 -0
  14. extras/multi_channel_seqlen_stats.py +177 -0
  15. extras/remove_silence_musicnet_midi.py +32 -0
  16. infer.py +232 -0
  17. install_dataset.py +285 -0
  18. logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/.DS_Store +0 -0
  19. logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/checkpoints/.DS_Store +0 -0
  20. logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/result_mc13_full_plus_256_default_all_eval_final.json +444 -0
  21. logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/result_mc13_full_plus_256_default_mir_st500_voc_debug.json +18 -0
  22. logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/result_mc13_full_plus_256_default_rwc_pop_bass.json +20 -0
  23. logs/2024/notask_all_cross_v6_xk2_amp0811_gm_ext_plus_nops_b72/result_mt3_full_plus_default_all_eval_final.json +654 -0
  24. model/RoPE/RoPE.py +306 -0
  25. model/RoPE/__pycache__/RoPE.cpython-310.pyc +0 -0
  26. model/__pycache__/conformer_helper.cpython-310.pyc +0 -0
  27. model/__pycache__/perceiver_helper.cpython-310.pyc +0 -0
  28. model/__pycache__/perceiver_mod.cpython-310.pyc +0 -0
  29. model/__pycache__/pitchshift_layer.cpython-310.pyc +0 -0
  30. model/__pycache__/positional_encoding.cpython-310.pyc +0 -0
  31. model/__pycache__/t5mod.cpython-310.pyc +0 -0
  32. model/__pycache__/t5mod_helper.cpython-310.pyc +0 -0
  33. model/conv_block.py +217 -0
  34. model/lr_scheduler.py +91 -0
  35. model/optimizers.py +218 -0
  36. model/pitchshift_layer.py +550 -0
  37. model/positional_encoding.py +288 -0
  38. model/spectrogram.py +225 -0
  39. model/t5mod.py +687 -0
  40. model/t5mod_helper.py +133 -0
  41. pytest.ini +2 -0
  42. requirements.txt +16 -0
  43. test.py +183 -0
  44. tests/.DS_Store +0 -0
  45. tests/assert_fns.py +55 -0
  46. tests/audio_test.py +144 -0
  47. tests/event2note_test.py +187 -0
  48. tests/event_codec_test.py +158 -0
  49. tests/metrics_test.py +118 -0
  50. tests/midi_test.py +65 -0
.gitattributes CHANGED
@@ -8,7 +8,6 @@
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
  *.model filter=lfs diff=lfs merge=lfs -text
14
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -34,25 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zip filter=lfs diff=lfs merge=lfs -text
35
  *.zst filter=lfs diff=lfs merge=lfs -text
36
  *tfevents* filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - uncompressed
38
- *.pcm filter=lfs diff=lfs merge=lfs -text
39
- *.sam filter=lfs diff=lfs merge=lfs -text
40
- *.raw filter=lfs diff=lfs merge=lfs -text
41
- # Audio files - compressed
42
- *.aac filter=lfs diff=lfs merge=lfs -text
43
- *.flac filter=lfs diff=lfs merge=lfs -text
44
- *.mp3 filter=lfs diff=lfs merge=lfs -text
45
- *.ogg filter=lfs diff=lfs merge=lfs -text
46
- *.wav filter=lfs diff=lfs merge=lfs -text
47
- # Image files - uncompressed
48
- *.bmp filter=lfs diff=lfs merge=lfs -text
49
- *.gif filter=lfs diff=lfs merge=lfs -text
50
- *.png filter=lfs diff=lfs merge=lfs -text
51
- *.tiff filter=lfs diff=lfs merge=lfs -text
52
- # Image files - compressed
53
- *.jpg filter=lfs diff=lfs merge=lfs -text
54
- *.jpeg filter=lfs diff=lfs merge=lfs -text
55
- *.webp filter=lfs diff=lfs merge=lfs -text
56
- # Video files - compressed
57
- *.mp4 filter=lfs diff=lfs merge=lfs -text
58
- *.webm filter=lfs diff=lfs merge=lfs -text
 
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/yourmt3-multi-instrument-music-transcription/multi-instrument-music-transcription-on)](https://paperswithcode.com/sota/multi-instrument-music-transcription-on?p=yourmt3-multi-instrument-music-transcription) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/yourmt3-multi-instrument-music-transcription/music-transcription-on-slakh2100)](https://paperswithcode.com/sota/music-transcription-on-slakh2100?p=yourmt3-multi-instrument-music-transcription) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/yourmt3-multi-instrument-music-transcription/drum-transcription-in-music-dtm-on-enst-drums)](https://paperswithcode.com/sota/drum-transcription-in-music-dtm-on-enst-drums?p=yourmt3-multi-instrument-music-transcription)
2
+
3
+ ![yourmt3-low-resolution-color-logo-crop](https://user-images.githubusercontent.com/26891722/204390355-001877a1-d019-46d7-a33c-d3a3adc0743c.png)
4
+ ### YourMT3: Multi-task and multi-track music transcription for everyone
5
+
6
+ ## NEWS
7
+
8
+ - **YourMT3+ (MLSP2024)**: [arxiv:2407.04822](https://arxiv.org/abs/2407.04822) | [Colab Demo](https://colab.research.google.com/drive/1AgOVEBfZknDkjmSRA7leoa81a2vrnhBG?usp=sharing) | [Code](https://github.com/mimbres/YourMT3/issues/2#issuecomment-2255643217)(pre-release)
9
+ - 2024.08 🤗HuggingFace [Spaces Demo](https://huggingface.co/spaces/mimbres/YourMT3) (Free GPU) is out!
10
+ - ⭐️ YouTube transcription is working again! (you'll need to authenticate your google account).
11
+
12
+ ![ymt3p_front_img_hi](https://github.com/mimbres/YourMT3/assets/26891722/2e152609-5131-4787-8422-8c4a8877fee1)
13
+
config/.DS_Store ADDED
Binary file (6.15 kB). View file
 
config/__pycache__/task.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
config/task.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """task.py"""
11
+ from config.vocabulary import *
12
+ from utils.note_event_dataclasses import Event
13
+
14
+ task_cfg = {
15
+ "mt3_midi": { # 11 classes + drum class
16
+ "name": "mt3_midi",
17
+ "train_program_vocab": program_vocab_presets["mt3_midi"],
18
+ "train_drum_vocab": drum_vocab_presets["gm"],
19
+ },
20
+ "mt3_midi_plus": { # 11 classes + singing + drum class
21
+ "name": "mt3_midi_plus",
22
+ "train_program_vocab": program_vocab_presets["mt3_midi_plus"],
23
+ "train_drum_vocab": drum_vocab_presets["gm"],
24
+ },
25
+ "mt3_full": { # 34 classes (except drums) as in MT3 paper
26
+ "name": "mt3_full",
27
+ "train_program_vocab": program_vocab_presets["mt3_full"],
28
+ "train_drum_vocab": drum_vocab_presets["gm"],
29
+ },
30
+ "mt3_full_plus": { # 34 classes (except drums) as in MT3 paper + singing + drum class
31
+ "name": "mt3_full_plus",
32
+ "train_program_vocab": program_vocab_presets["mt3_full_plus"],
33
+ "train_drum_vocab": drum_vocab_presets["gm"],
34
+ },
35
+ "gm_ext_plus": { # 13 classes + singing + chorus (except drums)
36
+ "name": "gm_ext_plus",
37
+ "train_program_vocab": program_vocab_presets["gm_ext_plus"],
38
+ "train_drum_vocab": drum_vocab_presets["gm"],
39
+ },
40
+ "singing_v1": {
41
+ "name": "singing",
42
+ "train_program_vocab": program_vocab_presets["mt3_full_plus"],
43
+ "train_drum_vocab": drum_vocab_presets["gm"],
44
+ "subtask_tokens": ["task", "transcribe_singing", "transcribe_all"],
45
+ "ignore_decoding_tokens": ["task", "transcribe_singing", "transcribe_all"],
46
+ "max_task_token_length": 2,
47
+ "eval_subtask_prefix": {
48
+ "default": [Event("transcribe_all", 0), Event("task", 0)],
49
+ "singing-only": [Event("transcribe_singing", 0),
50
+ Event("task", 0)],
51
+ }
52
+ },
53
+ "singing_drum_v1": {
54
+ "name": "singing_drum",
55
+ "train_program_vocab": program_vocab_presets["mt3_full_plus"],
56
+ "train_drum_vocab": drum_vocab_presets["gm"],
57
+ "subtask_tokens": ["task", "transcribe_singing", "transcribe_drum", "transcribe_all"],
58
+ "ignore_decoding_tokens": [
59
+ "task", "transcribe_singing", "transcribe_drum", "transcribe_all"
60
+ ],
61
+ "max_task_token_length": 2,
62
+ "eval_subtask_prefix": {
63
+ "default": [Event("transcribe_all", 0), Event("task", 0)],
64
+ "singing-only": [Event("transcribe_singing", 0),
65
+ Event("task", 0)],
66
+ "drum-only": [Event("transcribe_drum", 0),
67
+ Event("task", 0)],
68
+ }
69
+ },
70
+ "mc13": { # multi-channel decoding task of {11 classes + drums + singing}
71
+ "name": "mc13",
72
+ "train_program_vocab": program_vocab_presets["gm_plus"],
73
+ "train_drum_vocab": drum_vocab_presets["gm"],
74
+ "num_decoding_channels": len(program_vocab_presets["gm_plus"]) + 1, # 13
75
+ "max_note_token_length_per_ch": 512, # multi-channel decoding exclusive parameter
76
+ "mask_loss_strategy": None, # multi-channel decoding exclusive parameter
77
+ },
78
+ "mc13_256": { # multi-channel decoding task of {11 classes + drums + singing}
79
+ "name": "mc13_256",
80
+ "train_program_vocab": program_vocab_presets["gm_plus"],
81
+ "train_drum_vocab": drum_vocab_presets["gm"],
82
+ "num_decoding_channels": len(program_vocab_presets["gm_plus"]) + 1, # 13
83
+ "max_note_token_length_per_ch": 256, # multi-channel decoding exclusive parameter
84
+ "mask_loss_strategy": None, # multi-channel decoding exclusive parameter
85
+ },
86
+ "mc13_full_plus": { # multi-channel decoding task of {34 classes + drums + singing & chorus} mapped to 13 channels
87
+ "name": "mc13_full_plus",
88
+ "train_program_vocab": program_vocab_presets["mt3_full_plus"],
89
+ "train_drum_vocab": drum_vocab_presets["gm"],
90
+ "program2channel_vocab_source": program_vocab_presets["gm_plus"],
91
+ "num_decoding_channels": 13,
92
+ "max_note_token_length_per_ch": 512, # multi-channel decoding exclusive parameter
93
+ "mask_loss_strategy": None, # multi-channel decoding exclusive parameter
94
+ },
95
+ "mc13_full_plus_256": { # multi-channel decoding task of {34 classes + drums + singing & chorus} mapped to 13 channels
96
+ "name": "mc13_full_plus_256",
97
+ "train_program_vocab": program_vocab_presets["mt3_full_plus"],
98
+ "train_drum_vocab": drum_vocab_presets["gm"],
99
+ "program2channel_vocab_source": program_vocab_presets["gm_plus"],
100
+ "num_decoding_channels": 13,
101
+ "max_note_token_length_per_ch": 256, # multi-channel decoding exclusive parameter
102
+ "mask_loss_strategy": None, # multi-channel decoding exclusive parameter
103
+ },
104
+ "exc_v1": {
105
+ "name": "exclusive",
106
+ "train_program_vocab": program_vocab_presets["mt3_full_plus"],
107
+ "train_drum_vocab": drum_vocab_presets["gm"],
108
+ "subtask_tokens": ["transcribe", "all", ":"],
109
+ # "ignore_decoding_tokens": [
110
+ # "task", "transcribe_singing", "transcribe_drum", "transcribe_all"
111
+ # ],
112
+ # "max_task_token_length": 2,
113
+ "ignore_decoding_tokens_from_and_to": ["transcribe", ":"],
114
+ "eval_subtask_prefix": { # this is the main task that transcribe all instruments
115
+ "default": [Event("transcribe", 0), Event("all", 0), Event(":", 0)],
116
+ },
117
+ "shuffle_subtasks": True,
118
+ },
119
+ }
extras/datasets_eval_testing.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils.datasets_eval import AudioFileDataset
2
+ from torch.utils.data import DataLoader
3
+ import pytorch_lightning as pl
4
+
5
+
6
+ def test():
7
+
8
+ ds = AudioFileDataset()
9
+ dl = DataLoader(
10
+ ds, batch_size=None, collate_fn=lambda k: k
11
+ ) # empty collate_fn is required to use mixed types.
12
+
13
+ for x, y in dl:
14
+ break
15
+
16
+ class MyModel(pl.LightningModule):
17
+
18
+ def __init__(self, **kwargs):
19
+ super().__init__()
20
+
21
+ def forward(self, x):
22
+ return x
23
+
24
+ def training_step(self, batch, batch_idx):
25
+ return 0
26
+
27
+ def validation_step(self, batch, batch_idx):
28
+ print(batch)
29
+ return 0
30
+
31
+ def train_dataloader(self):
32
+ return dl
33
+
34
+ def val_dataloader(self):
35
+ return dl
36
+
37
+ def configure_optimizers(self):
38
+ return None
39
+
40
+ model = MyModel()
41
+ trainer = pl.Trainer()
42
+ trainer.validate(model)
extras/examples/1727.mid ADDED
Binary file (49.2 kB). View file
 
extras/examples/805_S00_8s83.mid ADDED
Binary file (2.15 kB). View file
 
extras/examples/drum.mid ADDED
Binary file (10.1 kB). View file
 
extras/examples/drum_converted.mid ADDED
Binary file (12.8 kB). View file
 
extras/examples/piano.mid ADDED
Binary file (54 kB). View file
 
extras/install_deepspeed.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ # not required on pytorch 2.0:latest container
4
+ pip install cupy-cuda11x -f https://pip.cupy.dev/aarch64
5
+
6
+ apt-get update
7
+ apt-get install git
8
+ apt-get install libaio-dev
9
+
10
+ DS_BUILD_OPS=1 pip install deepspeed
11
+ ds_report
12
+
13
+
14
+ pip install deepspeed==0.7.7
15
+
16
+ git clone https://github.com/NVIDIA/apex
17
+ cd apex
18
+ pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
19
+
20
+ In case you have trouble building apex from source we recommend using the NGC containers
21
+ from here which come with a pre-built PyTorch and apex release.
22
+
23
+ nvcr.io/nvidia/pytorch:23.01-py3
24
+
25
+ pip install deepspeed, pip install transformers[deepspeed]
26
+ https://www.deepspeed.ai/docs/config-json/#autotuning
27
+
28
+ """
extras/multi_channel_seqlen_stats.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ from typing import Dict, Tuple
11
+ from copy import deepcopy
12
+ from collections import Counter
13
+ import numpy as np
14
+ import torch
15
+ from utils.data_modules import AMTDataModule
16
+ from utils.task_manager import TaskManager
17
+ from config.data_presets import data_preset_single_cfg, data_preset_multi_cfg
18
+ from utils.augment import intra_stem_augment_processor
19
+
20
+
21
+ def get_ds(data_preset_multi: Dict, task_name: str, train_num_samples_per_epoch: int = 90000):
22
+ tm = TaskManager(task_name=task_name)
23
+ tm.max_note_token_length_per_ch = 1024 # only to check the max length
24
+ dm = AMTDataModule(data_preset_multi=data_preset_multi,
25
+ task_manager=tm,
26
+ train_num_samples_per_epoch=train_num_samples_per_epoch)
27
+ dm.setup('fit')
28
+ dl = dm.train_dataloader()
29
+ ds = dl.flattened[0].dataset
30
+ return ds
31
+
32
+
33
+ data_preset_multi = data_preset_multi_cfg["all_cross_v6"]
34
+ task_name = "mc13" # "mt3_full_plus"
35
+ ds = get_ds(data_preset_multi, task_name=task_name)
36
+ ds.random_amp_range = [0.8, 1.1]
37
+ ds.stem_xaug_policy = {
38
+ "max_k": 5,
39
+ "tau": 0.3,
40
+ "alpha": 1.0,
41
+ "max_subunit_stems": 12,
42
+ "no_instr_overlap": True,
43
+ "no_drum_overlap": True,
44
+ "uhat_intra_stem_augment": True,
45
+ }
46
+
47
+ length_all = []
48
+ for i in range(40000):
49
+ if i % 5000 == 0:
50
+ print(i)
51
+ audio_arr, note_token_arr, task_totken_arr, pshift_steps = ds.__getitem__(i)
52
+ lengths = torch.sum(note_token_arr != 0, dim=2).flatten().cpu().tolist()
53
+ length_all.extend(lengths)
54
+
55
+ length_all = np.asarray(length_all)
56
+
57
+ # stats
58
+ empty_sequence = np.sum(length_all < 3) / len(length_all) * 100
59
+ print("empty_sequences:", f"{empty_sequence:.2f}", "%")
60
+
61
+ mean_except_empty = np.mean(length_all[length_all > 2])
62
+ print("mean_except_empty:", mean_except_empty)
63
+
64
+ median_except_empty = np.median(length_all[length_all > 2])
65
+ print("median_except_empty:", median_except_empty)
66
+
67
+ ch_less_than_768 = np.sum(length_all < 768) / len(length_all) * 100
68
+ print("ch_less_than_768:", f"{ch_less_than_768:.2f}", "%")
69
+
70
+ ch_larger_than_512 = np.sum(length_all > 512) / len(length_all) * 100
71
+ print("ch_larger_than_512:", f"{ch_larger_than_512:.6f}", "%")
72
+
73
+ ch_larger_than_256 = np.sum(length_all > 256) / len(length_all) * 100
74
+ print("ch_larger_than_256:", f"{ch_larger_than_256:.6f}", "%")
75
+
76
+ ch_larger_than_128 = np.sum(length_all > 128) / len(length_all) * 100
77
+ print("ch_larger_than_128:", f"{ch_larger_than_128:.6f}", "%")
78
+
79
+ ch_larger_than_64 = np.sum(length_all > 64) / len(length_all) * 100
80
+ print("ch_larger_than_64:", f"{ch_larger_than_64:.6f}", "%")
81
+
82
+ song_length_all = length_all.reshape(-1, 13)
83
+ song_larger_than_512 = 0
84
+ song_larger_than_256 = 0
85
+ song_larger_than_128 = 0
86
+ song_larger_than_64 = 0
87
+ for l in song_length_all:
88
+ if np.sum(l > 512) > 0:
89
+ song_larger_than_512 += 1
90
+ if np.sum(l > 256) > 0:
91
+ song_larger_than_256 += 1
92
+ if np.sum(l > 128) > 0:
93
+ song_larger_than_128 += 1
94
+ if np.sum(l > 64) > 0:
95
+ song_larger_than_64 += 1
96
+ num_songs = len(song_length_all)
97
+ print("song_larger_than_512:", f"{song_larger_than_512/num_songs*100:.4f}", "%")
98
+ print("song_larger_than_256:", f"{song_larger_than_256/num_songs*100:.4f}", "%")
99
+ print("song_larger_than_128:", f"{song_larger_than_128/num_songs*100:.4f}", "%")
100
+ print("song_larger_than_64:", f"{song_larger_than_64/num_songs*100:.4f}", "%")
101
+
102
+ instr_dict = {
103
+ 0: "Piano",
104
+ 1: "Chromatic Percussion",
105
+ 2: "Organ",
106
+ 3: "Guitar",
107
+ 4: "Bass",
108
+ 5: "Strings + Ensemble",
109
+ 6: "Brass",
110
+ 7: "Reed",
111
+ 8: "Pipe",
112
+ 9: "Synth Lead",
113
+ 10: "Synth Pad",
114
+ 11: "Singing",
115
+ 12: "Drums",
116
+ }
117
+ cnt_larger_than_512 = Counter()
118
+ for i in np.where(length_all > 512)[0] % 13:
119
+ cnt_larger_than_512[i] += 1
120
+ print("larger_than_512:")
121
+ for k, v in cnt_larger_than_512.items():
122
+ print(f" - {instr_dict[k]}: {v}")
123
+
124
+ cnt_larger_than_256 = Counter()
125
+ for i in np.where(length_all > 256)[0] % 13:
126
+ cnt_larger_than_256[i] += 1
127
+ print("larger_than_256:")
128
+ for k, v in cnt_larger_than_256.items():
129
+ print(f" - {instr_dict[k]}: {v}")
130
+
131
+ cnt_larger_than_128 = Counter()
132
+ for i in np.where(length_all > 128)[0] % 13:
133
+ cnt_larger_than_128[i] += 1
134
+ print("larger_than_128:")
135
+ for k, v in cnt_larger_than_128.items():
136
+ print(f" - {instr_dict[k]}: {v}")
137
+ """
138
+ empty_sequences: 91.06 %
139
+ mean_except_empty: 36.68976799156269
140
+ median_except_empty: 31.0
141
+ ch_less_than_768: 100.00 %
142
+ ch_larger_than_512: 0.000158 %
143
+ ch_larger_than_256: 0.015132 %
144
+ ch_larger_than_128: 0.192061 %
145
+ ch_larger_than_64: 0.661260 %
146
+ song_larger_than_512: 0.0021 %
147
+ song_larger_than_256: 0.1926 %
148
+ song_larger_than_128: 2.2280 %
149
+ song_larger_than_64: 6.1033 %
150
+
151
+ larger_than_512:
152
+ - Guitar: 7
153
+ - Strings + Ensemble: 3
154
+ larger_than_256:
155
+ - Piano: 177
156
+ - Guitar: 680
157
+ - Strings + Ensemble: 79
158
+ - Organ: 2
159
+ - Chromatic Percussion: 11
160
+ - Bass: 1
161
+ - Synth Lead: 2
162
+ - Brass: 1
163
+ - Reed: 5
164
+ larger_than_128:
165
+ - Guitar: 4711
166
+ - Strings + Ensemble: 1280
167
+ - Piano: 5548
168
+ - Bass: 211
169
+ - Synth Pad: 22
170
+ - Pipe: 18
171
+ - Chromatic Percussion: 55
172
+ - Synth Lead: 22
173
+ - Organ: 75
174
+ - Reed: 161
175
+ - Brass: 45
176
+ - Drums: 11
177
+ """
extras/remove_silence_musicnet_midi.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+
4
+ from utils.midi import midi2note
5
+ from utils.note2event import note2note_event
6
+ from utils.note_event_dataclasses import Note
7
+ from utils.note_event_dataclasses import NoteEvent
8
+ from utils.midi import note_event2midi
9
+
10
+ data_home = '../../data'
11
+ dataset_name = 'musicnet'
12
+ base_dir = os.path.join(data_home, dataset_name + '_yourmt3_16k')
13
+ mid_pattern = os.path.join(base_dir, '*_midi', '*.mid')
14
+ mid_files = glob.glob(mid_pattern, recursive=True)
15
+
16
+ for mid_file in mid_files:
17
+ notes, _ = midi2note(mid_file)
18
+ first_onset_time = notes[0].onset
19
+ fixed_notes = []
20
+ for note in notes:
21
+ fixed_notes.append(
22
+ Note(
23
+ is_drum=note.is_drum,
24
+ program=note.program,
25
+ onset=note.onset - first_onset_time,
26
+ offset=note.offset - first_onset_time,
27
+ pitch=note.pitch,
28
+ velocity=note.velocity))
29
+ assert len(notes) == len(fixed_notes)
30
+ fixed_note_events = note2note_event(fixed_notes, return_activity=False)
31
+ note_event2midi(fixed_note_events, mid_file)
32
+ print(f'Overwriting {mid_file}')
infer.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @title Model helper
2
+ import os
3
+ from collections import Counter
4
+ import argparse
5
+ import torch
6
+ import numpy as np
7
+ from typing import Literal, Dict
8
+ import torchaudio
9
+
10
+ from model.init_train import initialize_trainer, update_config
11
+ from utils.task_manager import TaskManager
12
+ from config.vocabulary import drum_vocab_presets
13
+ from utils.utils import str2bool
14
+ from utils.utils import Timer
15
+ from utils.audio import slice_padded_array
16
+ from utils.note2event import mix_notes
17
+ from utils.event2note import merge_zipped_note_events_and_ties_to_notes
18
+ from utils.utils import write_model_output_as_midi, write_err_cnt_as_json
19
+ from model.ymt3 import YourMT3
20
+
21
+ def load_model_checkpoint(args=None):
22
+ parser = argparse.ArgumentParser(description="YourMT3")
23
+ # General
24
+ parser.add_argument('exp_id', type=str, help='A unique identifier for the experiment is used to resume training. The "@" symbol can be used to load a specific checkpoint.')
25
+ parser.add_argument('-p', '--project', type=str, default='ymt3', help='project name')
26
+ parser.add_argument('-ac', '--audio-codec', type=str, default=None, help='audio codec (default=None). {"spec", "melspec"}. If None, default value defined in config.py will be used.')
27
+ parser.add_argument('-hop', '--hop-length', type=int, default=None, help='hop length in frames (default=None). {128, 300} 128 for MT3, 300 for PerceiverTFIf None, default value defined in config.py will be used.')
28
+ parser.add_argument('-nmel', '--n-mels', type=int, default=None, help='number of mel bins (default=None). If None, default value defined in config.py will be used.')
29
+ parser.add_argument('-if', '--input-frames', type=int, default=None, help='number of audio frames for input segment (default=None). If None, default value defined in config.py will be used.')
30
+ # Model configurations
31
+ parser.add_argument('-sqr', '--sca-use-query-residual', type=str2bool, default=None, help='sca use query residual flag. Default follows config.py')
32
+ parser.add_argument('-enc', '--encoder-type', type=str, default=None, help="Encoder type. 't5' or 'perceiver-tf' or 'conformer'. Default is 't5', following config.py.")
33
+ parser.add_argument('-dec', '--decoder-type', type=str, default=None, help="Decoder type. 't5' or 'multi-t5'. Default is 't5', following config.py.")
34
+ parser.add_argument('-preenc', '--pre-encoder-type', type=str, default='default', help="Pre-encoder type. None or 'conv' or 'default'. By default, t5_enc:None, perceiver_tf_enc:conv, conformer:None")
35
+ parser.add_argument('-predec', '--pre-decoder-type', type=str, default='default', help="Pre-decoder type. {None, 'linear', 'conv1', 'mlp', 'group_linear'} or 'default'. Default is {'t5': None, 'perceiver-tf': 'linear', 'conformer': None}.")
36
+ parser.add_argument('-cout', '--conv-out-channels', type=int, default=None, help='Number of filters for pre-encoder conv layer. Default follows "model_cfg" of config.py.')
37
+ parser.add_argument('-tenc', '--task-cond-encoder', type=str2bool, default=True, help='task conditional encoder (default=True). True or False')
38
+ parser.add_argument('-tdec', '--task-cond-decoder', type=str2bool, default=True, help='task conditional decoder (default=True). True or False')
39
+ parser.add_argument('-df', '--d-feat', type=int, default=None, help='Audio feature will be projected to this dimension for Q,K,V of T5 or K,V of Perceiver (default=None). If None, default value defined in config.py will be used.')
40
+ parser.add_argument('-pt', '--pretrained', type=str2bool, default=False, help='pretrained T5(default=False). True or False')
41
+ parser.add_argument('-b', '--base-name', type=str, default="google/t5-v1_1-small", help='base model name (default="google/t5-v1_1-small")')
42
+ parser.add_argument('-epe', '--encoder-position-encoding-type', type=str, default='default', help="Positional encoding type of encoder. By default, pre-defined PE for T5 or Perceiver-TF encoder in config.py. For T5: {'sinusoidal', 'trainable'}, conformer: {'rotary', 'trainable'}, Perceiver-TF: {'trainable', 'rope', 'alibi', 'alibit', 'None', '0', 'none', 'tkd', 'td', 'tk', 'kdt'}.")
43
+ parser.add_argument('-dpe', '--decoder-position-encoding-type', type=str, default='default', help="Positional encoding type of decoder. By default, pre-defined PE for T5 in config.py. {'sinusoidal', 'trainable'}.")
44
+ parser.add_argument('-twe', '--tie-word-embedding', type=str2bool, default=None, help='tie word embedding (default=None). If None, default value defined in config.py will be used.')
45
+ parser.add_argument('-el', '--event-length', type=int, default=None, help='event length (default=None). If None, default value defined in model cfg of config.py will be used.')
46
+ # Perceiver-TF configurations
47
+ parser.add_argument('-dl', '--d-latent', type=int, default=None, help='Latent dimension of Perceiver. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
48
+ parser.add_argument('-nl', '--num-latents', type=int, default=None, help='Number of latents of Perceiver. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
49
+ parser.add_argument('-dpm', '--perceiver-tf-d-model', type=int, default=None, help='Perceiver-TF d_model (default=None). If None, default value defined in config.py will be used.')
50
+ parser.add_argument('-npb', '--num-perceiver-tf-blocks', type=int, default=None, help='Number of blocks of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py.')
51
+ parser.add_argument('-npl', '--num-perceiver-tf-local-transformers-per-block', type=int, default=None, help='Number of local layers per block of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
52
+ parser.add_argument('-npt', '--num-perceiver-tf-temporal-transformers-per-block', type=int, default=None, help='Number of temporal layers per block of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
53
+ parser.add_argument('-atc', '--attention-to-channel', type=str2bool, default=None, help='Attention to channel flag of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
54
+ parser.add_argument('-ln', '--layer-norm-type', type=str, default=None, help='Layer normalization type (default=None). {"layer_norm", "rms_norm"}. If None, default value defined in config.py will be used.')
55
+ parser.add_argument('-ff', '--ff-layer-type', type=str, default=None, help='Feed forward layer type (default=None). {"mlp", "moe", "gmlp"}. If None, default value defined in config.py will be used.')
56
+ parser.add_argument('-wf', '--ff-widening-factor', type=int, default=None, help='Feed forward layer widening factor for MLP/MoE/gMLP (default=None). If None, default value defined in config.py will be used.')
57
+ parser.add_argument('-nmoe', '--moe-num-experts', type=int, default=None, help='Number of experts for MoE (default=None). If None, default value defined in config.py will be used.')
58
+ parser.add_argument('-kmoe', '--moe-topk', type=int, default=None, help='Top-k for MoE (default=None). If None, default value defined in config.py will be used.')
59
+ parser.add_argument('-act', '--hidden-act', type=str, default=None, help='Hidden activation function (default=None). {"gelu", "silu", "relu", "tanh"}. If None, default value defined in config.py will be used.')
60
+ parser.add_argument('-rt', '--rotary-type', type=str, default=None, help='Rotary embedding type expressed in three letters. e.g. ppl: "pixel" for SCA and latents, "lang" for temporal transformer. If None, use config.')
61
+ parser.add_argument('-rk', '--rope-apply-to-keys', type=str2bool, default=None, help='Apply rope to keys (default=None). If None, use config.')
62
+ parser.add_argument('-rp', '--rope-partial-pe', type=str2bool, default=None, help='Whether to apply RoPE to partial positions (default=None). If None, use config.')
63
+ # Decoder configurations
64
+ parser.add_argument('-dff', '--decoder-ff-layer-type', type=str, default=None, help='Feed forward layer type of decoder (default=None). {"mlp", "moe", "gmlp"}. If None, default value defined in config.py will be used.')
65
+ parser.add_argument('-dwf', '--decoder-ff-widening-factor', type=int, default=None, help='Feed forward layer widening factor for decoder MLP/MoE/gMLP (default=None). If None, default value defined in config.py will be used.')
66
+ # Task and Evaluation configurations
67
+ parser.add_argument('-tk', '--task', type=str, default='mt3_full_plus', help='tokenizer type (default=mt3_full_plus). See config/task.py for more options.')
68
+ parser.add_argument('-epv', '--eval-program-vocab', type=str, default=None, help='evaluation vocabulary (default=None). If None, default vocabulary of the data preset will be used.')
69
+ parser.add_argument('-edv', '--eval-drum-vocab', type=str, default=None, help='evaluation vocabulary for drum (default=None). If None, default vocabulary of the data preset will be used.')
70
+ parser.add_argument('-etk', '--eval-subtask-key', type=str, default='default', help='evaluation subtask key (default=default). See config/task.py for more options.')
71
+ parser.add_argument('-t', '--onset-tolerance', type=float, default=0.05, help='onset tolerance (default=0.05).')
72
+ parser.add_argument('-os', '--test-octave-shift', type=str2bool, default=False, help='test optimal octave shift (default=False). True or False')
73
+ parser.add_argument('-w', '--write-model-output', type=str2bool, default=True, help='write model test output to file (default=False). True or False')
74
+ # Trainer configurations
75
+ parser.add_argument('-pr','--precision', type=str, default="bf16-mixed", help='precision (default="bf16-mixed") {32, 16, bf16, bf16-mixed}')
76
+ parser.add_argument('-st', '--strategy', type=str, default='auto', help='strategy (default=auto). auto or deepspeed or ddp')
77
+ parser.add_argument('-n', '--num-nodes', type=int, default=1, help='number of nodes (default=1)')
78
+ parser.add_argument('-g', '--num-gpus', type=str, default='auto', help='number of gpus (default="auto")')
79
+ parser.add_argument('-wb', '--wandb-mode', type=str, default="disabled", help='wandb mode for logging (default=None). "disabled" or "online" or "offline". If None, default value defined in config.py will be used.')
80
+ # Debug
81
+ parser.add_argument('-debug', '--debug-mode', type=str2bool, default=False, help='debug mode (default=False). True or False')
82
+ parser.add_argument('-tps', '--test-pitch-shift', type=int, default=None, help='use pitch shift when testing. debug-purpose only. (default=None). semitone in int.')
83
+ args = parser.parse_args(args)
84
+ # yapf: enable
85
+ if torch.__version__ >= "1.13":
86
+ torch.set_float32_matmul_precision("high")
87
+ args.epochs = None
88
+
89
+ # Initialize and update config
90
+ _, _, dir_info, shared_cfg = initialize_trainer(args, stage='test')
91
+ shared_cfg, audio_cfg, model_cfg = update_config(args, shared_cfg, stage='test')
92
+
93
+ if args.eval_drum_vocab != None: # override eval_drum_vocab
94
+ eval_drum_vocab = drum_vocab_presets[args.eval_drum_vocab]
95
+
96
+ # Initialize task manager
97
+ tm = TaskManager(task_name=args.task,
98
+ max_shift_steps=int(shared_cfg["TOKENIZER"]["max_shift_steps"]),
99
+ debug_mode=args.debug_mode)
100
+ print(f"Task: {tm.task_name}, Max Shift Steps: {tm.max_shift_steps}")
101
+
102
+ # Use GPU if available
103
+ device = "cpu" # torch.device("cuda" if torch.cuda.is_available() else "cpu")
104
+
105
+ # Model
106
+ model = YourMT3(
107
+ audio_cfg=audio_cfg,
108
+ model_cfg=model_cfg,
109
+ shared_cfg=shared_cfg,
110
+ optimizer=None,
111
+ task_manager=tm, # tokenizer is a member of task_manager
112
+ eval_subtask_key=args.eval_subtask_key,
113
+ write_output_dir=dir_info["lightning_dir"] if args.write_model_output or args.test_octave_shift else None
114
+ ).to(device)
115
+ checkpoint = torch.load(dir_info["last_ckpt_path"])
116
+ state_dict = checkpoint['state_dict']
117
+ new_state_dict = {k: v for k, v in state_dict.items() if 'pitchshift' not in k}
118
+ model.load_state_dict(new_state_dict, strict=False)
119
+ return model.eval()
120
+
121
+
122
+ def transcribe(model, audio_info):
123
+ t = Timer()
124
+
125
+ # Converting Audio
126
+ t.start()
127
+ audio, sr = torchaudio.load(uri=audio_info['filepath'])
128
+ audio = torch.mean(audio, dim=0).unsqueeze(0)
129
+ audio = torchaudio.functional.resample(audio, sr, model.audio_cfg['sample_rate'])
130
+ audio_segments = slice_padded_array(audio, model.audio_cfg['input_frames'], model.audio_cfg['input_frames'])
131
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
132
+ audio_segments = torch.from_numpy(audio_segments.astype('float32')).to(device).unsqueeze(1) # (n_seg, 1, seg_sz)
133
+ t.stop(); t.print_elapsed_time("converting audio");
134
+
135
+ # Inference
136
+ t.start()
137
+ pred_token_arr, _ = model.inference_file(bsz=8, audio_segments=audio_segments)
138
+ t.stop(); t.print_elapsed_time("model inference");
139
+
140
+ # Post-processing
141
+ t.start()
142
+ num_channels = model.task_manager.num_decoding_channels
143
+ n_items = audio_segments.shape[0]
144
+ start_secs_file = [model.audio_cfg['input_frames'] * i / model.audio_cfg['sample_rate'] for i in range(n_items)]
145
+ pred_notes_in_file = []
146
+ n_err_cnt = Counter()
147
+ for ch in range(num_channels):
148
+ pred_token_arr_ch = [arr[:, ch, :] for arr in pred_token_arr] # (B, L)
149
+ zipped_note_events_and_tie, list_events, ne_err_cnt = model.task_manager.detokenize_list_batches(
150
+ pred_token_arr_ch, start_secs_file, return_events=True)
151
+ pred_notes_ch, n_err_cnt_ch = merge_zipped_note_events_and_ties_to_notes(zipped_note_events_and_tie)
152
+ pred_notes_in_file.append(pred_notes_ch)
153
+ n_err_cnt += n_err_cnt_ch
154
+ pred_notes = mix_notes(pred_notes_in_file) # This is the mixed notes from all channels
155
+
156
+ # Write MIDI
157
+ write_model_output_as_midi(pred_notes, 'content/',
158
+ audio_info['track_name'], model.midi_output_inverse_vocab)
159
+ t.stop(); t.print_elapsed_time("post processing");
160
+ midifile = os.path.join('content/model_output/', audio_info['track_name'] + '.mid')
161
+ assert os.path.exists(midifile)
162
+ return midifile
163
+
164
+
165
+ def prepare_media(source_path_or_url: os.PathLike,
166
+ source_type: Literal['audio_filepath', 'youtube_url'],
167
+ delete_video: bool = True) -> Dict:
168
+ """prepare media from source path or youtube, and return audio info"""
169
+ # Get audio_file
170
+ if source_type == 'audio_filepath':
171
+ audio_file = source_path_or_url
172
+ else:
173
+ raise ValueError(source_type)
174
+
175
+ # Create info
176
+ info = torchaudio.info(audio_file)
177
+ return {
178
+ "filepath": audio_file,
179
+ "track_name": os.path.basename(audio_file).split('.')[0],
180
+ "sample_rate": int(info.sample_rate),
181
+ "bits_per_sample": int(info.bits_per_sample),
182
+ "num_channels": int(info.num_channels),
183
+ "num_frames": int(info.num_frames),
184
+ "duration": int(info.num_frames / info.sample_rate),
185
+ "encoding": str.lower(info.encoding),
186
+ }
187
+
188
+ def process_audio(audio_filepath, model):
189
+ if audio_filepath is None:
190
+ return None
191
+ audio_info = prepare_media(audio_filepath, source_type='audio_filepath')
192
+ midifile = transcribe(model, audio_info)
193
+ return midifile
194
+
195
+ if __name__ == "__main__":
196
+ # @title Load Checkpoint
197
+ model_name = 'YPTF+Single (noPS)' # @param ["YMT3+", "YPTF+Single (noPS)", "YPTF+Multi (PS)", "YPTF.MoE+Multi (noPS)", "YPTF.MoE+Multi (PS)"]
198
+ precision = '16' # @param ["32", "bf16-mixed", "16"]
199
+ project = '2024'
200
+
201
+ if model_name == "YMT3+":
202
+ checkpoint = "[email protected]"
203
+ args = [checkpoint, '-p', project, '-pr', precision]
204
+ elif model_name == "YPTF+Single (noPS)":
205
+ checkpoint = "ptf_all_cross_rebal5_mirst_xk2_edr005_attend_c_full_plus_b100@model.ckpt"
206
+ args = [checkpoint, '-p', project, '-enc', 'perceiver-tf', '-ac', 'spec',
207
+ '-hop', '300', '-atc', '1', '-pr', precision]
208
+ elif model_name == "YPTF+Multi (PS)":
209
+ checkpoint = "mc13_256_all_cross_v6_xk5_amp0811_edr005_attend_c_full_plus_2psn_nl26_sb_b26r_800k@model.ckpt"
210
+ args = [checkpoint, '-p', project, '-tk', 'mc13_full_plus_256',
211
+ '-dec', 'multi-t5', '-nl', '26', '-enc', 'perceiver-tf',
212
+ '-ac', 'spec', '-hop', '300', '-atc', '1', '-pr', precision]
213
+ elif model_name == "YPTF.MoE+Multi (noPS)":
214
+ checkpoint = "mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops@last.ckpt"
215
+ args = [checkpoint, '-p', project, '-tk', 'mc13_full_plus_256', '-dec', 'multi-t5',
216
+ '-nl', '26', '-enc', 'perceiver-tf', '-sqr', '1', '-ff', 'moe',
217
+ '-wf', '4', '-nmoe', '8', '-kmoe', '2', '-act', 'silu', '-epe', 'rope',
218
+ '-rp', '1', '-ac', 'spec', '-hop', '300', '-atc', '1', '-pr', precision]
219
+ elif model_name == "YPTF.MoE+Multi (PS)":
220
+ checkpoint = "mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b80_ps2@model.ckpt"
221
+ args = [checkpoint, '-p', project, '-tk', 'mc13_full_plus_256', '-dec', 'multi-t5',
222
+ '-nl', '26', '-enc', 'perceiver-tf', '-sqr', '1', '-ff', 'moe',
223
+ '-wf', '4', '-nmoe', '8', '-kmoe', '2', '-act', 'silu', '-epe', 'rope',
224
+ '-rp', '1', '-ac', 'spec', '-hop', '300', '-atc', '1', '-pr', precision]
225
+ else:
226
+ raise ValueError(model_name)
227
+
228
+ model = load_model_checkpoint(args=args)
229
+
230
+ TEST_AUDIO_FILEPATH = "/home/richhiey/Desktop/code/test_data/test.mp3"
231
+ midifile = process_audio(TEST_AUDIO_FILEPATH, model)
232
+ print(midifile)
install_dataset.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ install_dataset.py """
11
+ import os
12
+ import argparse
13
+ import mirdata
14
+ from typing import Optional, Tuple, Union
15
+ from utils.preprocess.generate_dataset_stats import generate_dataset_stats_for_all_datasets, update_dataset_stats_for_new_dataset
16
+ from utils.mirdata_dev.datasets import slakh16k
17
+ from utils.preprocess.preprocess_slakh import preprocess_slakh16k, add_program_and_is_drum_info_to_file_list
18
+ from utils.preprocess.preprocess_musicnet import preprocess_musicnet16k
19
+ from utils.preprocess.preprocess_maps import preprocess_maps16k
20
+ from utils.preprocess.preprocess_maestro import preprocess_maestro16k
21
+ from utils.preprocess.preprocess_guitarset import preprocess_guitarset16k, create_filelist_by_style_guitarset16k
22
+ from utils.preprocess.preprocess_enstdrums import preprocess_enstdrums16k, create_filelist_dtm_random_enstdrums16k
23
+ from utils.preprocess.preprocess_mir_st500 import preprocess_mir_st500_16k
24
+ from utils.preprocess.preprocess_cmedia import preprocess_cmedia_16k
25
+ from utils.preprocess.preprocess_rwc_pop_full import preprocess_rwc_pop_full16k
26
+ from utils.preprocess.preprocess_rwc_pop import preprocess_rwc_pop16k
27
+ from utils.preprocess.preprocess_egmd import preprocess_egmd16k
28
+ from utils.preprocess.preprocess_mir1k import preprocess_mir1k_16k
29
+ from utils.preprocess.preprocess_urmp import preprocess_urmp16k
30
+ from utils.preprocess.preprocess_idmt_smt_bass import preprocess_idmt_smt_bass_16k
31
+ from utils.preprocess.preprocess_geerdes import preprocess_geerdes16k
32
+ from utils.utils import download_and_extract #, download_and_extract_zenodo_restricted
33
+
34
+ # zenodo_token = "eyJhbGciOiJIUzUxMiIsImlhdCI6MTcxMDE1MDYzNywiZXhwIjoxNzEyNzA3MTk5fQ.eyJpZCI6ImRmODA5NzZlLTBjM2QtNDk5NS05YjM0LWFiNGM4NzJhMmZhMSIsImRhdGEiOnt9LCJyYW5kb20iOiIwMzY5ZDcxZjc2NTMyN2UyYmVmN2ExYjJkMmMyYTRhNSJ9.0aHnNC-7ivWQO6l8twjLR0NDH4boC0uOolAAmogVt7XRi2PHU5MEKBQoK7-wgDdnmWEIqEIvoLO6p8KTnsY9dg"
35
+
36
+
37
+ def install_slakh(data_home=os.PathLike, no_down=False) -> None:
38
+ if not no_down:
39
+ ds = slakh16k.Dataset(data_home, version='2100-yourmt3-16k')
40
+ ds.download(partial_download=['2100-yourmt3-16k', 'index'])
41
+ del (ds)
42
+ preprocess_slakh16k(data_home, delete_source_files=False, fix_bass_octave=True)
43
+ add_program_and_is_drum_info_to_file_list(data_home)
44
+
45
+
46
+ def install_musicnet(data_home=os.PathLike, no_down=False) -> None:
47
+ if not no_down:
48
+ url = "https://zenodo.org/record/7811639/files/musicnet_yourmt3_16k.tar.gz?download=1"
49
+ checksum = "a2da7c169e26d452a4e8b9bef498b3d7"
50
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
51
+ preprocess_musicnet16k(data_home, dataset_name='musicnet')
52
+
53
+
54
+ def install_maps(data_home=os.PathLike, no_down=False, sanity_check=False) -> None:
55
+ if not no_down:
56
+ url = "https://zenodo.org/record/7812075/files/maps_yourmt3_16k.tar.gz?download=1"
57
+ checksum = "6b070d162c931cd5e69c16ef2398a649"
58
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
59
+ preprocess_maps16k(data_home, dataset_name='maps', ignore_pedal=False, sanity_check=sanity_check)
60
+
61
+
62
+ def install_maestro(data_home=os.PathLike, no_down=False, sanity_check=False) -> None:
63
+ if not no_down:
64
+ url = "https://zenodo.org/record/7852176/files/maestro_yourmt3_16k.tar.gz?download=1"
65
+ checksum = "c17c6a188d936e5ff3870ef27144d397"
66
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
67
+ preprocess_maestro16k(data_home, dataset_name='maestro', ignore_pedal=False, sanity_check=sanity_check)
68
+
69
+
70
+ def install_guitarset(data_home=os.PathLike, no_down=False) -> None:
71
+ if not no_down:
72
+ url = "https://zenodo.org/record/7831843/files/guitarset_yourmt3_16k.tar.gz?download=1"
73
+ checksum = "e3cfe0cc9394d91d9c290ce888821360"
74
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
75
+ preprocess_guitarset16k(data_home, dataset_name='guitarset')
76
+ create_filelist_by_style_guitarset16k(data_home, dataset_name='guitarset')
77
+
78
+
79
+ def install_enstdrums(data_home, no_down=False) -> None:
80
+ if not no_down:
81
+ url = "https://zenodo.org/record/7831843/files/enstdrums_yourmt3_16k.tar.gz?download=1"
82
+ checksum = "7e28c2a923e4f4162b3d83877cedb5eb"
83
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
84
+ preprocess_enstdrums16k(data_home, dataset_name='enstdrums')
85
+ create_filelist_dtm_random_enstdrums16k(data_home, dataset_name='enstdrums')
86
+
87
+
88
+ def install_egmd(data_home, no_down=False) -> None:
89
+ if not no_down:
90
+ url = "https://zenodo.org/record/7831072/files/egmc_yourmt3_16k.tar.gz?download=1"
91
+ checksum = "4f615157ea4c52a64c6c9dcf68bf2bde"
92
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
93
+ preprocess_egmd16k(data_home, dataset_name='egmd')
94
+
95
+
96
+ def install_mirst500(data_home, zenodo_token, no_down=False, sanity_check=True, apply_correction=False) -> None:
97
+ """ Update Oct 2023: MIR-ST500 with FULL audio files"""
98
+ if not no_down:
99
+ url = "https://zenodo.org/records/10016397/files/mir_st500_yourmt3_16k.tar.gz?download=1"
100
+ checksum = "98eb52eb2456ce4034e21750f309da13"
101
+ download_and_extract(data_home, url, check_sum=checksum, zenodo_token=zenodo_token)
102
+ preprocess_mir_st500_16k(data_home, dataset_name='mir_st500', sanity_check=sanity_check)
103
+
104
+
105
+ def install_cmedia(data_home, zenodo_token, no_down=False, sanity_check=True) -> None:
106
+ if not no_down:
107
+ url = "https://zenodo.org/records/10016397/files/cmedia_yourmt3_16k.tar.gz?download=1"
108
+ checksum = "e6cca23577ba7588e9ed9711a398f7cf"
109
+ download_and_extract(data_home, url, check_sum=checksum, zenodo_token=zenodo_token)
110
+ preprocess_cmedia_16k(data_home, dataset_name='cmedia', sanity_check=sanity_check, apply_correction=True)
111
+
112
+
113
+ def install_rwc_pop(data_home, zenodo_token, no_down=False) -> None:
114
+ if not no_down:
115
+ url = "https://zenodo.org/records/10016397/files/rwc_pop_yourmt3_16k.tar.gz?download=1"
116
+ checksum = "ad459f9fa1b6b87676b2fb37c0ba5dfc"
117
+ download_and_extract(data_home, url, check_sum=checksum, zenodo_token=zenodo_token)
118
+ preprocess_rwc_pop16k(data_home, dataset_name='rwc_pop') # bass transcriptions
119
+ preprocess_rwc_pop_full16k(data_home, dataset_name='rwc_pop') # full transcriptions
120
+
121
+
122
+ def install_mir1k(data_home, no_down=False) -> None:
123
+ if not no_down:
124
+ url = "https://zenodo.org/record/7955481/files/mir1k_yourmt3_16k.tar.gz?download=1"
125
+ checksum = "4cbac56a4e971432ca807efd5cb76d67"
126
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
127
+ # preprocess_mir1k_16k(data_home, dataset_name='mir1k')
128
+
129
+
130
+ def install_urmp(data_home, no_down=False) -> None:
131
+ if not no_down:
132
+ url = "https://zenodo.org/record/8021437/files/urmp_yourmt3_16k.tar.gz?download=1"
133
+ checksum = "4f539c71678a77ba34f6dfca41072102"
134
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
135
+ preprocess_urmp16k(data_home, dataset_name='urmp')
136
+
137
+
138
+ def install_idmt_smt_bass(data_home, no_down=False) -> None:
139
+ if not no_down:
140
+ url = "https://zenodo.org/records/10009959/files/idmt_smt_bass_yourmt3_16k.tar.gz?download=1"
141
+ checksum = "0c95f91926a1e95b1f5d075c05b7eb76"
142
+ download_and_extract(data_home, url, remove_tar_file=True, check_sum=checksum)
143
+ preprocess_idmt_smt_bass_16k(data_home, dataset_name='idmt_smt_bass', sanity_check=True,
144
+ edit_audio=False) # the donwloaded audio has already been edited
145
+
146
+
147
+ def install_random_nsynth(data_home, no_down=False) -> None:
148
+ return
149
+
150
+
151
+ def install_geerdes(data_home) -> None:
152
+ try:
153
+ preprocess_geerdes16k(data_home, dataset_name='geerdes', sanity_check=False)
154
+ except Exception as e:
155
+ print(e)
156
+ print("Geerdes dataset is not available for download. Please contact the dataset provider.")
157
+
158
+
159
+ def regenerate_dataset_stats(data_home) -> None:
160
+ generate_dataset_stats_for_all_datasets(data_home)
161
+
162
+
163
+ def get_cached_zenodo_token() -> str:
164
+ # check if cached token exists
165
+ if not os.path.exists('.cached_zenodo_token'):
166
+ raise Exception("Cached Zenodo token not found. Please enter your Zenodo token.")
167
+ # read cached token
168
+ with open('.cached_zenodo_token', 'r') as f:
169
+ zenodo_token = f.read().strip()
170
+ print(f"Using cached Zenodo token: {zenodo_token}")
171
+ return zenodo_token
172
+
173
+
174
+ def cache_zenodo_token(zenodo_token: str) -> None:
175
+ with open('.cached_zenodo_token', 'w') as f:
176
+ f.write(zenodo_token)
177
+ print("Your Zenodo token is cached.")
178
+
179
+
180
+ def option_prompt(data_home: os.PathLike, no_download: bool = False) -> None:
181
+ print("Select the dataset(s) to install (enter comma-separated numbers):")
182
+ print("1. Slakh")
183
+ print("2. MusicNet")
184
+ print("3. MAPS")
185
+ print("4. Maestro")
186
+ print("5. GuitarSet")
187
+ print("6. ENST-drums")
188
+ print("7. EGMD")
189
+ print("8. MIR-ST500 ** Restricted Access **")
190
+ print("9. CMedia ** Restricted Access **")
191
+ print("10. RWC-Pop (Bass and Full) ** Restricted Access **")
192
+ print("11. MIR-1K (NOT SUPPORTED)")
193
+ print("12. URMP")
194
+ print("13. IDMT-SMT-Bass")
195
+ print("14. Random-NSynth")
196
+ print("15. Geerdes")
197
+ print("16. Regenerate Dataset Stats (experimental)")
198
+ print("17. Request Token for ** Restricted Access **")
199
+ print("18. Exit")
200
+
201
+ choice = input("Enter your choices (multiple choices with comma): ")
202
+ choices = [c.strip() for c in choice.split(',')]
203
+
204
+ if "18" in choices:
205
+ print("Exiting.")
206
+ else:
207
+ # ask for Zenodo token
208
+ for c in choices:
209
+ if int(c) in [8, 9, 10]:
210
+ if no_download is True:
211
+ zenodo_token = None
212
+ else:
213
+ zenodo_token = input("Enter Zenodo token, or press enter to use the cached token:")
214
+ if zenodo_token == "":
215
+ zenodo_token = get_cached_zenodo_token()
216
+ else:
217
+ cache_zenodo_token(zenodo_token)
218
+ break
219
+
220
+ if "1" in choices:
221
+ install_slakh(data_home, no_down=no_download)
222
+ if "2" in choices:
223
+ install_musicnet(data_home, no_down=no_download)
224
+ if "3" in choices:
225
+ install_maps(data_home, no_down=no_download)
226
+ if "4" in choices:
227
+ install_maestro(data_home, no_down=no_download)
228
+ if "5" in choices:
229
+ install_guitarset(data_home, no_down=no_download)
230
+ if "6" in choices:
231
+ install_enstdrums(data_home, no_down=no_download)
232
+ if "7" in choices:
233
+ install_egmd(data_home, no_down=no_download)
234
+ if "8" in choices:
235
+ install_mirst500(data_home, zenodo_token, no_down=no_download)
236
+ if "9" in choices:
237
+ install_cmedia(data_home, zenodo_token, no_down=no_download)
238
+ if "10" in choices:
239
+ install_rwc_pop(data_home, zenodo_token, no_down=no_download)
240
+ if "11" in choices:
241
+ install_mir1k(data_home, no_down=no_download)
242
+ if "12" in choices:
243
+ install_urmp(data_home, no_down=no_download)
244
+ if "13" in choices:
245
+ install_idmt_smt_bass(data_home, no_down=no_download)
246
+ if "14" in choices:
247
+ install_random_nsynth(data_home, no_down=no_download)
248
+ if "15" in choices:
249
+ install_geerdes(data_home) # not available for download
250
+ if "16" in choices:
251
+ regenerate_dataset_stats(data_home, no_down=no_download)
252
+ if "17" in choices:
253
+ print("\nPlease visit https://zenodo.org/records/10016397 to request a Zenodo token.")
254
+ print("Upon submitting your request, you will receive an email with a link labeled 'Access the record'.")
255
+ print("Copy the token that follows 'token=' in that link.")
256
+ if not any(int(c) in range(16) for c in choices):
257
+ print("Invalid choice(s). Please enter valid numbers separated by commas.")
258
+
259
+
260
+ if __name__ == "__main__":
261
+
262
+ parser = argparse.ArgumentParser(description='Dataset installer script.')
263
+ # data home dir
264
+ parser.add_argument(
265
+ 'data_home',
266
+ type=str,
267
+ nargs='?',
268
+ default=None,
269
+ help='Path to data home directory. If None, use the default path defined in src/config/config.py')
270
+ # `no_download` option
271
+ parser.add_argument('--nodown',
272
+ '-nd',
273
+ action='store_true',
274
+ help='Flag to control downloading. If set, no downloading will occur.')
275
+ args = parser.parse_args()
276
+
277
+ if args.data_home is None:
278
+ from config.config import shared_cfg
279
+ data_home = shared_cfg["PATH"]["data_home"]
280
+ else:
281
+ data_home = args.data_home
282
+ os.makedirs(data_home, exist_ok=True)
283
+ no_download = args.nodown
284
+
285
+ option_prompt(data_home, no_download)
logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/.DS_Store ADDED
Binary file (6.15 kB). View file
 
logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/checkpoints/.DS_Store ADDED
Binary file (6.15 kB). View file
 
logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/result_mc13_full_plus_256_default_all_eval_final.json ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [ '-----------------------------------------------------------------',
2
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
3
+ 'Kick Drum': [36, 35],
4
+ 'Snare Drum': [38, 40]},
5
+ 'eval_vocab': [ { 'Bass': array([32, 33, 34, 35, 36, 37, 38, 39]),
6
+ 'Brass': array([56, 57, 58, 59, 60, 61, 62, 63]),
7
+ 'Chromatic Percussion': array([ 8, 9, 10, 11, 12, 13, 14, 15]),
8
+ 'Guitar': array([24, 25, 26, 27, 28, 29, 30, 31]),
9
+ 'Organ': array([16, 17, 18, 19, 20, 21, 22, 23]),
10
+ 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
11
+ 'Pipe': array([72, 73, 74, 75, 76, 77, 78, 79]),
12
+ 'Reed': array([64, 65, 66, 67, 68, 69, 70, 71]),
13
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),
14
+ 'Synth Lead': array([80, 81, 82, 83, 84, 85, 86, 87]),
15
+ 'Synth Pad': array([88, 89, 90, 91, 92, 93, 94, 95])}],
16
+ 'presets': ['slakh']},
17
+ [ { 'test/(slakh)frame_f': 0.8487486243247986,
18
+ 'test/(slakh)frame_f_pc': 0.8603377938270569,
19
+ 'test/(slakh)micro_offset_f_Bass': 0.869631290435791,
20
+ 'test/(slakh)micro_offset_f_Brass': 0.6678106784820557,
21
+ 'test/(slakh)micro_offset_f_Chromatic Percussion': 0.3883885443210602,
22
+ 'test/(slakh)micro_offset_f_Guitar': 0.6883301734924316,
23
+ 'test/(slakh)micro_offset_f_Organ': 0.6711145043373108,
24
+ 'test/(slakh)micro_offset_f_Piano': 0.683600664138794,
25
+ 'test/(slakh)micro_offset_f_Pipe': 0.6132691502571106,
26
+ 'test/(slakh)micro_offset_f_Reed': 0.6974674463272095,
27
+ 'test/(slakh)micro_offset_f_Strings': 0.5805016160011292,
28
+ 'test/(slakh)micro_offset_f_Synth Lead': 0.7929936647415161,
29
+ 'test/(slakh)micro_offset_f_Synth Pad': 0.2877211272716522,
30
+ 'test/(slakh)micro_onset_f_Bass': 0.9328573346138,
31
+ 'test/(slakh)micro_onset_f_Brass': 0.7279624342918396,
32
+ 'test/(slakh)micro_onset_f_Chromatic Percussion': 0.7239300012588501,
33
+ 'test/(slakh)micro_onset_f_Guitar': 0.8129678964614868,
34
+ 'test/(slakh)micro_onset_f_Organ': 0.7785788178443909,
35
+ 'test/(slakh)micro_onset_f_Piano': 0.8763775825500488,
36
+ 'test/(slakh)micro_onset_f_Pipe': 0.7128946185112,
37
+ 'test/(slakh)micro_onset_f_Reed': 0.8024615049362183,
38
+ 'test/(slakh)micro_onset_f_Strings': 0.7257015109062195,
39
+ 'test/(slakh)micro_onset_f_Synth Lead': 0.8566879034042358,
40
+ 'test/(slakh)micro_onset_f_Synth Pad': 0.4453693926334381,
41
+ 'test/(slakh)micro_onset_f_drum': 0.9112541079521179,
42
+ 'test/(slakh)multi_f': 0.7397979497909546,
43
+ 'test/(slakh)offset_f': 0.6961227655410767,
44
+ 'test/(slakh)offset_f_Bass': 0.8508948087692261,
45
+ 'test/(slakh)offset_f_Brass': 0.6707911491394043,
46
+ 'test/(slakh)offset_f_Chromatic Percussion': 0.3454752266407013,
47
+ 'test/(slakh)offset_f_Guitar': 0.6841201186180115,
48
+ 'test/(slakh)offset_f_Organ': 0.6124169230461121,
49
+ 'test/(slakh)offset_f_Piano': 0.6597973108291626,
50
+ 'test/(slakh)offset_f_Pipe': 0.5729255676269531,
51
+ 'test/(slakh)offset_f_Reed': 0.6875563263893127,
52
+ 'test/(slakh)offset_f_Strings': 0.5812338590621948,
53
+ 'test/(slakh)offset_f_Synth Lead': 0.7286868095397949,
54
+ 'test/(slakh)offset_f_Synth Pad': 0.27617573738098145,
55
+ 'test/(slakh)onset_f': 0.8419071435928345,
56
+ 'test/(slakh)onset_f_Bass': 0.9303264617919922,
57
+ 'test/(slakh)onset_f_Brass': 0.7301008701324463,
58
+ 'test/(slakh)onset_f_Chromatic Percussion': 0.6354409456253052,
59
+ 'test/(slakh)onset_f_Guitar': 0.8215969800949097,
60
+ 'test/(slakh)onset_f_Organ': 0.740757942199707,
61
+ 'test/(slakh)onset_f_Piano': 0.8769294619560242,
62
+ 'test/(slakh)onset_f_Pipe': 0.6801912784576416,
63
+ 'test/(slakh)onset_f_Reed': 0.7921974658966064,
64
+ 'test/(slakh)onset_f_Strings': 0.7303540110588074,
65
+ 'test/(slakh)onset_f_Synth Lead': 0.8201698064804077,
66
+ 'test/(slakh)onset_f_Synth Pad': 0.4131230413913727,
67
+ 'test/(slakh)onset_f_drum': 0.9040946960449219}],
68
+ '-----------------------------------------------------------------',
69
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
70
+ 'Kick Drum': [36, 35],
71
+ 'Snare Drum': [38, 40]},
72
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
73
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
74
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
75
+ 'presets': ['musicnet_thickstun']},
76
+ [ { 'test/(musicnet)frame_f': 0.6431668400764465,
77
+ 'test/(musicnet)frame_f_pc': 0.6712867617607117,
78
+ 'test/(musicnet)micro_offset_f_Piano': 0.462179034948349,
79
+ 'test/(musicnet)micro_offset_f_Strings': 0.2768903076648712,
80
+ 'test/(musicnet)micro_offset_f_Winds': 0.29622459411621094,
81
+ 'test/(musicnet)micro_onset_f_Piano': 0.8591256141662598,
82
+ 'test/(musicnet)micro_onset_f_Strings': 0.43876463174819946,
83
+ 'test/(musicnet)micro_onset_f_Winds': 0.5208131670951843,
84
+ 'test/(musicnet)micro_onset_f_drum': nan,
85
+ 'test/(musicnet)multi_f': 0.34509798884391785,
86
+ 'test/(musicnet)offset_f': 0.3474598824977875,
87
+ 'test/(musicnet)offset_f_Piano': 0.462179034948349,
88
+ 'test/(musicnet)offset_f_Strings': 0.2768903076648712,
89
+ 'test/(musicnet)offset_f_Winds': 0.29622459411621094,
90
+ 'test/(musicnet)onset_f': 0.6119987964630127,
91
+ 'test/(musicnet)onset_f_Piano': 0.8591256141662598,
92
+ 'test/(musicnet)onset_f_Strings': 0.43876463174819946,
93
+ 'test/(musicnet)onset_f_Winds': 0.5208131670951843,
94
+ 'test/(musicnet)onset_f_drum': nan}],
95
+ '-----------------------------------------------------------------',
96
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
97
+ 'Kick Drum': [36, 35],
98
+ 'Snare Drum': [38, 40]},
99
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
100
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
101
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
102
+ 'presets': ['musicnet_thickstun_em']},
103
+ [ { 'test/(musicnet)frame_f': 0.7289137840270996,
104
+ 'test/(musicnet)frame_f_pc': 0.7467241287231445,
105
+ 'test/(musicnet)micro_offset_f_Piano': 0.8453038334846497,
106
+ 'test/(musicnet)micro_offset_f_Strings': 0.5030071139335632,
107
+ 'test/(musicnet)micro_offset_f_Winds': 0.5958272814750671,
108
+ 'test/(musicnet)micro_onset_f_Piano': 0.9820442199707031,
109
+ 'test/(musicnet)micro_onset_f_Strings': 0.696555495262146,
110
+ 'test/(musicnet)micro_onset_f_Winds': 0.7763221859931946,
111
+ 'test/(musicnet)micro_onset_f_drum': nan,
112
+ 'test/(musicnet)multi_f': 0.64804607629776,
113
+ 'test/(musicnet)offset_f': 0.647705078125,
114
+ 'test/(musicnet)offset_f_Piano': 0.8453038930892944,
115
+ 'test/(musicnet)offset_f_Strings': 0.5030071139335632,
116
+ 'test/(musicnet)offset_f_Winds': 0.5958272814750671,
117
+ 'test/(musicnet)onset_f': 0.8195822834968567,
118
+ 'test/(musicnet)onset_f_Piano': 0.9820442199707031,
119
+ 'test/(musicnet)onset_f_Strings': 0.696555495262146,
120
+ 'test/(musicnet)onset_f_Winds': 0.7763221859931946,
121
+ 'test/(musicnet)onset_f_drum': nan}],
122
+ '-----------------------------------------------------------------',
123
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
124
+ 'Kick Drum': [36, 35],
125
+ 'Snare Drum': [38, 40]},
126
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
127
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
128
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
129
+ 'presets': ['musicnet_thickstun_ext']},
130
+ [ { 'test/(musicnet)frame_f': 0.7118843793869019,
131
+ 'test/(musicnet)frame_f_pc': 0.7285126447677612,
132
+ 'test/(musicnet)micro_offset_f_Piano': 0.3649955093860626,
133
+ 'test/(musicnet)micro_offset_f_Strings': 0.3690282702445984,
134
+ 'test/(musicnet)micro_offset_f_Winds': 0.32839900255203247,
135
+ 'test/(musicnet)micro_onset_f_Piano': 0.6793581247329712,
136
+ 'test/(musicnet)micro_onset_f_Strings': 0.5712332129478455,
137
+ 'test/(musicnet)micro_onset_f_Winds': 0.5383739471435547,
138
+ 'test/(musicnet)micro_onset_f_drum': nan,
139
+ 'test/(musicnet)multi_f': 0.39981773495674133,
140
+ 'test/(musicnet)offset_f': 0.4001583456993103,
141
+ 'test/(musicnet)offset_f_Piano': 0.38060951232910156,
142
+ 'test/(musicnet)offset_f_Strings': 0.447208970785141,
143
+ 'test/(musicnet)offset_f_Winds': 0.3266069293022156,
144
+ 'test/(musicnet)onset_f': 0.6380151510238647,
145
+ 'test/(musicnet)onset_f_Piano': 0.7089095115661621,
146
+ 'test/(musicnet)onset_f_Strings': 0.6469746828079224,
147
+ 'test/(musicnet)onset_f_Winds': 0.5373958349227905,
148
+ 'test/(musicnet)onset_f_drum': nan}],
149
+ '-----------------------------------------------------------------',
150
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
151
+ 'Kick Drum': [36, 35],
152
+ 'Snare Drum': [38, 40]},
153
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
154
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
155
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
156
+ 'presets': ['musicnet_thickstun_ext_em']},
157
+ [ { 'test/(musicnet)frame_f': 0.8102308511734009,
158
+ 'test/(musicnet)frame_f_pc': 0.8194969296455383,
159
+ 'test/(musicnet)micro_offset_f_Piano': 0.7447232007980347,
160
+ 'test/(musicnet)micro_offset_f_Strings': 0.7160074710845947,
161
+ 'test/(musicnet)micro_offset_f_Winds': 0.6263686418533325,
162
+ 'test/(musicnet)micro_onset_f_Piano': 0.9545997381210327,
163
+ 'test/(musicnet)micro_onset_f_Strings': 0.8443388342857361,
164
+ 'test/(musicnet)micro_onset_f_Winds': 0.7937955856323242,
165
+ 'test/(musicnet)micro_onset_f_drum': nan,
166
+ 'test/(musicnet)multi_f': 0.741904616355896,
167
+ 'test/(musicnet)offset_f': 0.7412658929824829,
168
+ 'test/(musicnet)offset_f_Piano': 0.7575887441635132,
169
+ 'test/(musicnet)offset_f_Strings': 0.7756823897361755,
170
+ 'test/(musicnet)offset_f_Winds': 0.6246463060379028,
171
+ 'test/(musicnet)onset_f': 0.8913213014602661,
172
+ 'test/(musicnet)onset_f_Piano': 0.9567371606826782,
173
+ 'test/(musicnet)onset_f_Strings': 0.8836190104484558,
174
+ 'test/(musicnet)onset_f_Winds': 0.7928102612495422,
175
+ 'test/(musicnet)onset_f_drum': nan}],
176
+ '-----------------------------------------------------------------',
177
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
178
+ 'Kick Drum': [36, 35],
179
+ 'Snare Drum': [38, 40]},
180
+ 'eval_vocab': [{'Singing Voice': [100, 101]}],
181
+ 'presets': ['mir_st500_voc']},
182
+ [ { 'test/(mir_st500)frame_f': 0.8046330809593201,
183
+ 'test/(mir_st500)frame_f_pc': 0.8059064745903015,
184
+ 'test/(mir_st500)melody_oa_Singing Voice': 0.7024147510528564,
185
+ 'test/(mir_st500)melody_rca_Singing Voice': 0.7087444067001343,
186
+ 'test/(mir_st500)melody_rpa_Singing Voice': 0.7024147510528564,
187
+ 'test/(mir_st500)micro_offset_f_Singing Voice': 0.5492644309997559,
188
+ 'test/(mir_st500)micro_onset_f_Singing Voice': 0.7167699337005615,
189
+ 'test/(mir_st500)micro_onset_f_drum': nan,
190
+ 'test/(mir_st500)multi_f': 0.5463830828666687,
191
+ 'test/(mir_st500)offset_f': 0.5456082820892334,
192
+ 'test/(mir_st500)offset_f_Singing Voice': 0.5464057922363281,
193
+ 'test/(mir_st500)onset_f': 0.714525580406189,
194
+ 'test/(mir_st500)onset_f_Singing Voice': 0.7155958414077759,
195
+ 'test/(mir_st500)onset_f_drum': nan}],
196
+ '-----------------------------------------------------------------',
197
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
198
+ 'Kick Drum': [36, 35],
199
+ 'Snare Drum': [38, 40]},
200
+ 'eval_vocab': [{'Singing Voice': [100, 101]}],
201
+ 'presets': ['mir_st500']},
202
+ [ { 'test/(mir_st500)frame_f': 0.17174109816551208,
203
+ 'test/(mir_st500)frame_f_pc': 0.17959092557430267,
204
+ 'test/(mir_st500)melody_oa_Singing Voice': 0.6857293844223022,
205
+ 'test/(mir_st500)melody_rca_Singing Voice': 0.6941294074058533,
206
+ 'test/(mir_st500)melody_rpa_Singing Voice': 0.6857293844223022,
207
+ 'test/(mir_st500)micro_offset_f_Singing Voice': 0.5249119997024536,
208
+ 'test/(mir_st500)micro_onset_f_Singing Voice': 0.6945751905441284,
209
+ 'test/(mir_st500)micro_onset_f_drum': nan,
210
+ 'test/(mir_st500)multi_f': 0.394542932510376,
211
+ 'test/(mir_st500)offset_f': 0.10482196509838104,
212
+ 'test/(mir_st500)offset_f_Singing Voice': 0.5240825414657593,
213
+ 'test/(mir_st500)onset_f': 0.14195600152015686,
214
+ 'test/(mir_st500)onset_f_Singing Voice': 0.6942352056503296,
215
+ 'test/(mir_st500)onset_f_drum': nan}],
216
+ '-----------------------------------------------------------------',
217
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
218
+ 'Kick Drum': [36, 35],
219
+ 'Snare Drum': [38, 40]},
220
+ 'eval_vocab': [None],
221
+ 'presets': ['enstdrums_dtp']},
222
+ [ { 'test/(enstdrums)frame_f': nan,
223
+ 'test/(enstdrums)frame_f_pc': nan,
224
+ 'test/(enstdrums)micro_onset_f_drum': 0.8911184072494507,
225
+ 'test/(enstdrums)offset_f': nan,
226
+ 'test/(enstdrums)onset_f': nan,
227
+ 'test/(enstdrums)onset_f_drum': 0.895440399646759}],
228
+ '-----------------------------------------------------------------',
229
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
230
+ 'Kick Drum': [36, 35],
231
+ 'Snare Drum': [38, 40]},
232
+ 'eval_vocab': [None],
233
+ 'presets': ['enstdrums_dtm']},
234
+ [ { 'test/(enstdrums)frame_f': nan,
235
+ 'test/(enstdrums)frame_f_pc': nan,
236
+ 'test/(enstdrums)micro_onset_f_drum': 0.8614727258682251,
237
+ 'test/(enstdrums)offset_f': nan,
238
+ 'test/(enstdrums)onset_f': nan,
239
+ 'test/(enstdrums)onset_f_drum': 0.866191565990448}],
240
+ '-----------------------------------------------------------------',
241
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
242
+ 'Kick Drum': [36, 35],
243
+ 'Snare Drum': [38, 40]},
244
+ 'eval_vocab': [None],
245
+ 'presets': ['guitarset_progression_pshift']},
246
+ [ { 'test/(guitarset)frame_f': 0.8917033672332764,
247
+ 'test/(guitarset)frame_f_pc': 0.8943274617195129,
248
+ 'test/(guitarset)micro_onset_f_drum': nan,
249
+ 'test/(guitarset)offset_f': 0.8057622909545898,
250
+ 'test/(guitarset)onset_f': 0.894720196723938,
251
+ 'test/(guitarset)onset_f_drum': nan}],
252
+ '-----------------------------------------------------------------',
253
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
254
+ 'Kick Drum': [36, 35],
255
+ 'Snare Drum': [38, 40]},
256
+ 'eval_vocab': [{'Bass': array([32, 33, 34, 35, 36, 37, 38, 39])}],
257
+ 'presets': ['rwc_pop_bass']},
258
+ [ { 'test/(rwc_pop)frame_f': 0.11498401314020157,
259
+ 'test/(rwc_pop)frame_f_pc': 0.22951407730579376,
260
+ 'test/(rwc_pop)micro_offset_f_Bass': 0.23757217824459076,
261
+ 'test/(rwc_pop)micro_onset_f_Bass': 0.3047849237918854,
262
+ 'test/(rwc_pop)micro_onset_f_drum': nan,
263
+ 'test/(rwc_pop)multi_f': 0.08871123939752579,
264
+ 'test/(rwc_pop)offset_f': 0.07318931818008423,
265
+ 'test/(rwc_pop)offset_f_Bass': 0.22453349828720093,
266
+ 'test/(rwc_pop)onset_f': 0.10135377943515778,
267
+ 'test/(rwc_pop)onset_f_Bass': 0.2894594371318817,
268
+ 'test/(rwc_pop)onset_f_drum': nan}],
269
+ '-----------------------------------------------------------------',
270
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
271
+ 'Kick Drum': [36, 35],
272
+ 'Snare Drum': [38, 40]},
273
+ 'eval_vocab': [{'Piano': array([0, 1, 2, 3, 4, 5, 6, 7])}],
274
+ 'presets': ['maestro']},
275
+ [ { 'test/(maestro)frame_f': 0.8877186179161072,
276
+ 'test/(maestro)frame_f_pc': 0.891477108001709,
277
+ 'test/(maestro)micro_offset_f_Piano': 0.8288719058036804,
278
+ 'test/(maestro)micro_onset_f_Piano': 0.953188419342041,
279
+ 'test/(maestro)micro_onset_f_drum': nan,
280
+ 'test/(maestro)multi_f': 0.8396718502044678,
281
+ 'test/(maestro)offset_f': 0.8396490216255188,
282
+ 'test/(maestro)offset_f_Piano': 0.8396718502044678,
283
+ 'test/(maestro)onset_f': 0.9605904221534729,
284
+ 'test/(maestro)onset_f_Piano': 0.9606162905693054,
285
+ 'test/(maestro)onset_f_drum': nan}],
286
+ '-----------------------------------------------------------------',
287
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
288
+ 'Kick Drum': [36, 35],
289
+ 'Snare Drum': [38, 40]},
290
+ 'eval_vocab': [ { 'Bass': array([32, 33, 34, 35, 36, 37, 38, 39]),
291
+ 'Brass': array([56, 57, 58, 59, 60, 61, 62, 63]),
292
+ 'Chromatic Percussion': array([ 8, 9, 10, 11, 12, 13, 14, 15]),
293
+ 'Guitar': array([24, 25, 26, 27, 28, 29, 30, 31]),
294
+ 'Organ': array([16, 17, 18, 19, 20, 21, 22, 23]),
295
+ 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
296
+ 'Pipe': array([72, 73, 74, 75, 76, 77, 78, 79]),
297
+ 'Reed': array([64, 65, 66, 67, 68, 69, 70, 71]),
298
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),
299
+ 'Synth Lead': array([80, 81, 82, 83, 84, 85, 86, 87]),
300
+ 'Synth Pad': array([88, 89, 90, 91, 92, 93, 94, 95])}],
301
+ 'presets': ['urmp']},
302
+ [ { 'test/(urmp)frame_f': 0.8529648780822754,
303
+ 'test/(urmp)frame_f_pc': 0.8660681247711182,
304
+ 'test/(urmp)micro_offset_f_Bass': nan,
305
+ 'test/(urmp)micro_offset_f_Brass': 0.6444324851036072,
306
+ 'test/(urmp)micro_offset_f_Chromatic Percussion': nan,
307
+ 'test/(urmp)micro_offset_f_Guitar': nan,
308
+ 'test/(urmp)micro_offset_f_Organ': nan,
309
+ 'test/(urmp)micro_offset_f_Piano': nan,
310
+ 'test/(urmp)micro_offset_f_Pipe': nan,
311
+ 'test/(urmp)micro_offset_f_Reed': 0.024539878591895103,
312
+ 'test/(urmp)micro_offset_f_Strings': 0.6389819383621216,
313
+ 'test/(urmp)micro_offset_f_Synth Lead': nan,
314
+ 'test/(urmp)micro_offset_f_Synth Pad': nan,
315
+ 'test/(urmp)micro_onset_f_Bass': nan,
316
+ 'test/(urmp)micro_onset_f_Brass': 0.8284024000167847,
317
+ 'test/(urmp)micro_onset_f_Chromatic Percussion': nan,
318
+ 'test/(urmp)micro_onset_f_Guitar': nan,
319
+ 'test/(urmp)micro_onset_f_Organ': nan,
320
+ 'test/(urmp)micro_onset_f_Piano': nan,
321
+ 'test/(urmp)micro_onset_f_Pipe': nan,
322
+ 'test/(urmp)micro_onset_f_Reed': 0.030674846842885017,
323
+ 'test/(urmp)micro_onset_f_Strings': 0.7788345813751221,
324
+ 'test/(urmp)micro_onset_f_Synth Lead': nan,
325
+ 'test/(urmp)micro_onset_f_Synth Pad': nan,
326
+ 'test/(urmp)micro_onset_f_drum': nan,
327
+ 'test/(urmp)multi_f': 0.6515601277351379,
328
+ 'test/(urmp)offset_f': 0.6573383808135986,
329
+ 'test/(urmp)offset_f_Bass': nan,
330
+ 'test/(urmp)offset_f_Brass': 0.6444324851036072,
331
+ 'test/(urmp)offset_f_Chromatic Percussion': nan,
332
+ 'test/(urmp)offset_f_Guitar': nan,
333
+ 'test/(urmp)offset_f_Organ': nan,
334
+ 'test/(urmp)offset_f_Piano': nan,
335
+ 'test/(urmp)offset_f_Pipe': nan,
336
+ 'test/(urmp)offset_f_Reed': 0.025221969932317734,
337
+ 'test/(urmp)offset_f_Strings': 0.6739915609359741,
338
+ 'test/(urmp)offset_f_Synth Lead': nan,
339
+ 'test/(urmp)offset_f_Synth Pad': nan,
340
+ 'test/(urmp)onset_f': 0.7913210391998291,
341
+ 'test/(urmp)onset_f_Bass': nan,
342
+ 'test/(urmp)onset_f_Brass': 0.8284023404121399,
343
+ 'test/(urmp)onset_f_Chromatic Percussion': nan,
344
+ 'test/(urmp)onset_f_Guitar': nan,
345
+ 'test/(urmp)onset_f_Organ': nan,
346
+ 'test/(urmp)onset_f_Piano': nan,
347
+ 'test/(urmp)onset_f_Pipe': nan,
348
+ 'test/(urmp)onset_f_Reed': 0.0308399461209774,
349
+ 'test/(urmp)onset_f_Strings': 0.8027772903442383,
350
+ 'test/(urmp)onset_f_Synth Lead': nan,
351
+ 'test/(urmp)onset_f_Synth Pad': nan,
352
+ 'test/(urmp)onset_f_drum': nan}],
353
+ '-----------------------------------------------------------------',
354
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
355
+ 'Kick Drum': [36, 35],
356
+ 'Snare Drum': [38, 40]},
357
+ 'eval_vocab': [{'Piano': array([0, 1, 2, 3, 4, 5, 6, 7])}],
358
+ 'presets': ['maps_default']},
359
+ [ { 'test/(maps)frame_f': 0.7393348217010498,
360
+ 'test/(maps)frame_f_pc': 0.7572816610336304,
361
+ 'test/(maps)micro_offset_f_Piano': 0.5382159948348999,
362
+ 'test/(maps)micro_onset_f_Piano': 0.8817331194877625,
363
+ 'test/(maps)micro_onset_f_drum': nan,
364
+ 'test/(maps)multi_f': 0.5370228290557861,
365
+ 'test/(maps)offset_f': 0.5364712476730347,
366
+ 'test/(maps)offset_f_Piano': 0.5370274186134338,
367
+ 'test/(maps)onset_f': 0.8763453960418701,
368
+ 'test/(maps)onset_f_Piano': 0.8772046566009521,
369
+ 'test/(maps)onset_f_drum': nan}],
370
+ '-----------------------------------------------------------------',
371
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
372
+ 'Kick Drum': [36, 35],
373
+ 'Snare Drum': [38, 40]},
374
+ 'eval_vocab': [ { 'Bass': array([32, 33, 34, 35, 36, 37, 38, 39]),
375
+ 'Brass': array([56, 57, 58, 59, 60, 61, 62, 63]),
376
+ 'Chromatic Percussion': array([ 8, 9, 10, 11, 12, 13, 14, 15]),
377
+ 'Guitar': array([24, 25, 26, 27, 28, 29, 30, 31]),
378
+ 'Organ': array([16, 17, 18, 19, 20, 21, 22, 23]),
379
+ 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
380
+ 'Pipe': array([72, 73, 74, 75, 76, 77, 78, 79]),
381
+ 'Reed': array([64, 65, 66, 67, 68, 69, 70, 71]),
382
+ 'Singing Voice': [100, 101],
383
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),
384
+ 'Synth Lead': array([80, 81, 82, 83, 84, 85, 86, 87]),
385
+ 'Synth Pad': array([88, 89, 90, 91, 92, 93, 94, 95])}],
386
+ 'presets': ['rwc_pop_full']},
387
+ [ { 'test/(rwc_pop)frame_f': 0.5036451816558838,
388
+ 'test/(rwc_pop)frame_f_pc': 0.6125867366790771,
389
+ 'test/(rwc_pop)melody_oa_Singing Voice': 0.11486613005399704,
390
+ 'test/(rwc_pop)melody_rca_Singing Voice': 0.5412429571151733,
391
+ 'test/(rwc_pop)melody_rpa_Singing Voice': 0.11486613005399704,
392
+ 'test/(rwc_pop)micro_offset_f_Bass': 0.12412633746862411,
393
+ 'test/(rwc_pop)micro_offset_f_Brass': 0.17169594764709473,
394
+ 'test/(rwc_pop)micro_offset_f_Chromatic Percussion': 0.00454988656565547,
395
+ 'test/(rwc_pop)micro_offset_f_Guitar': 0.10689836740493774,
396
+ 'test/(rwc_pop)micro_offset_f_Organ': 0.03003580868244171,
397
+ 'test/(rwc_pop)micro_offset_f_Piano': 0.26919493079185486,
398
+ 'test/(rwc_pop)micro_offset_f_Pipe': 0.0404929593205452,
399
+ 'test/(rwc_pop)micro_offset_f_Reed': 0.0871850997209549,
400
+ 'test/(rwc_pop)micro_offset_f_Singing Voice': 0.05066283047199249,
401
+ 'test/(rwc_pop)micro_offset_f_Strings': 0.07972945272922516,
402
+ 'test/(rwc_pop)micro_offset_f_Synth Lead': 0.00444957846775651,
403
+ 'test/(rwc_pop)micro_offset_f_Synth Pad': 0.003691470716148615,
404
+ 'test/(rwc_pop)micro_onset_f_Bass': 0.15706360340118408,
405
+ 'test/(rwc_pop)micro_onset_f_Brass': 0.21161191165447235,
406
+ 'test/(rwc_pop)micro_onset_f_Chromatic Percussion': 0.01451630424708128,
407
+ 'test/(rwc_pop)micro_onset_f_Guitar': 0.22117385268211365,
408
+ 'test/(rwc_pop)micro_onset_f_Organ': 0.04916893690824509,
409
+ 'test/(rwc_pop)micro_onset_f_Piano': 0.4931323528289795,
410
+ 'test/(rwc_pop)micro_onset_f_Pipe': 0.05809858813881874,
411
+ 'test/(rwc_pop)micro_onset_f_Reed': 0.09726177901029587,
412
+ 'test/(rwc_pop)micro_onset_f_Singing Voice': 0.08127696067094803,
413
+ 'test/(rwc_pop)micro_onset_f_Strings': 0.15419140458106995,
414
+ 'test/(rwc_pop)micro_onset_f_Synth Lead': 0.01814058981835842,
415
+ 'test/(rwc_pop)micro_onset_f_Synth Pad': 0.01224013976752758,
416
+ 'test/(rwc_pop)micro_onset_f_drum': 0.6436583399772644,
417
+ 'test/(rwc_pop)multi_f': 0.25216758251190186,
418
+ 'test/(rwc_pop)offset_f': 0.1824045181274414,
419
+ 'test/(rwc_pop)offset_f_Bass': 0.11402810364961624,
420
+ 'test/(rwc_pop)offset_f_Brass': 0.1329607367515564,
421
+ 'test/(rwc_pop)offset_f_Chromatic Percussion': 0.005995466839522123,
422
+ 'test/(rwc_pop)offset_f_Guitar': 0.09556090831756592,
423
+ 'test/(rwc_pop)offset_f_Organ': 0.028927000239491463,
424
+ 'test/(rwc_pop)offset_f_Piano': 0.24980537593364716,
425
+ 'test/(rwc_pop)offset_f_Pipe': 0.06132758408784866,
426
+ 'test/(rwc_pop)offset_f_Reed': 0.10889723151922226,
427
+ 'test/(rwc_pop)offset_f_Singing Voice': 0.056261900812387466,
428
+ 'test/(rwc_pop)offset_f_Strings': 0.09541783481836319,
429
+ 'test/(rwc_pop)offset_f_Synth Lead': 0.009463030844926834,
430
+ 'test/(rwc_pop)offset_f_Synth Pad': 0.002910840790718794,
431
+ 'test/(rwc_pop)onset_f': 0.33215251564979553,
432
+ 'test/(rwc_pop)onset_f_Bass': 0.14644065499305725,
433
+ 'test/(rwc_pop)onset_f_Brass': 0.16938887536525726,
434
+ 'test/(rwc_pop)onset_f_Chromatic Percussion': 0.01952812261879444,
435
+ 'test/(rwc_pop)onset_f_Guitar': 0.19601118564605713,
436
+ 'test/(rwc_pop)onset_f_Organ': 0.053188469260931015,
437
+ 'test/(rwc_pop)onset_f_Piano': 0.4373621344566345,
438
+ 'test/(rwc_pop)onset_f_Pipe': 0.07636822760105133,
439
+ 'test/(rwc_pop)onset_f_Reed': 0.126461461186409,
440
+ 'test/(rwc_pop)onset_f_Singing Voice': 0.0904482901096344,
441
+ 'test/(rwc_pop)onset_f_Strings': 0.1726456582546234,
442
+ 'test/(rwc_pop)onset_f_Synth Lead': 0.022806964814662933,
443
+ 'test/(rwc_pop)onset_f_Synth Pad': 0.009439672343432903,
444
+ 'test/(rwc_pop)onset_f_drum': 0.6497281193733215}]]
logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/result_mc13_full_plus_256_default_mir_st500_voc_debug.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [ '-----------------------------------------------------------------',
2
+ { 'eval_drum_vocab': None,
3
+ 'eval_vocab': [{'Singing Voice': [100, 101]}],
4
+ 'presets': ['mir_st500_voc_debug']},
5
+ [ { 'test/(mir_st500)frame_f': 0.8410361409187317,
6
+ 'test/(mir_st500)frame_f_pc': 0.841644287109375,
7
+ 'test/(mir_st500)melody_oa_Singing Voice': 0.7632298469543457,
8
+ 'test/(mir_st500)melody_rca_Singing Voice': 0.765277087688446,
9
+ 'test/(mir_st500)melody_rpa_Singing Voice': 0.7632298469543457,
10
+ 'test/(mir_st500)micro_offset_f_Singing Voice': 0.589411199092865,
11
+ 'test/(mir_st500)micro_onset_f_Singing Voice': 0.7484803795814514,
12
+ 'test/(mir_st500)micro_onset_f_drum': nan,
13
+ 'test/(mir_st500)multi_f': 0.586248517036438,
14
+ 'test/(mir_st500)offset_f': 0.5856963992118835,
15
+ 'test/(mir_st500)offset_f_Singing Voice': 0.5862772464752197,
16
+ 'test/(mir_st500)onset_f': 0.7450660467147827,
17
+ 'test/(mir_st500)onset_f_Singing Voice': 0.745836615562439,
18
+ 'test/(mir_st500)onset_f_drum': nan}]]
logs/2024/mc13_256_g4_all_v7_mt3f_sqr_rms_moe_wf4_n8k2_silu_rope_rp_b36_nops/result_mc13_full_plus_256_default_rwc_pop_bass.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [ '-----------------------------------------------------------------',
2
+ { 'add_pitch_class_metric': ['Bass'],
3
+ 'eval_drum_vocab': None,
4
+ 'eval_vocab': [{'Bass': array([32, 33, 34, 35, 36, 37, 38, 39])}],
5
+ 'presets': ['rwc_pop_bass']},
6
+ [ { 'test/(rwc_pop)frame_f': 0.11498401314020157,
7
+ 'test/(rwc_pop)frame_f_pc': 0.22951407730579376,
8
+ 'test/(rwc_pop)micro_offset_f_Bass': 0.25098368525505066,
9
+ 'test/(rwc_pop)micro_offset_f_Bass_pc': 0.38576385378837585,
10
+ 'test/(rwc_pop)micro_onset_f_Bass': 0.32546746730804443,
11
+ 'test/(rwc_pop)micro_onset_f_Bass_pc': 0.509304940700531,
12
+ 'test/(rwc_pop)micro_onset_f_drum': nan,
13
+ 'test/(rwc_pop)multi_f': 0.1446290910243988,
14
+ 'test/(rwc_pop)offset_f': 0.07808000594377518,
15
+ 'test/(rwc_pop)offset_f_Bass': 0.23580260574817657,
16
+ 'test/(rwc_pop)offset_f_Bass_pc': 0.35772594809532166,
17
+ 'test/(rwc_pop)onset_f': 0.1098790168762207,
18
+ 'test/(rwc_pop)onset_f_Bass': 0.30800098180770874,
19
+ 'test/(rwc_pop)onset_f_Bass_pc': 0.4801085293292999,
20
+ 'test/(rwc_pop)onset_f_drum': nan}]]
logs/2024/notask_all_cross_v6_xk2_amp0811_gm_ext_plus_nops_b72/result_mt3_full_plus_default_all_eval_final.json ADDED
@@ -0,0 +1,654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [ '-----------------------------------------------------------------',
2
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
3
+ 'Kick Drum': [36, 35],
4
+ 'Snare Drum': [38, 40]},
5
+ 'eval_vocab': [None],
6
+ 'presets': ['slakh']},
7
+ [ { 'test/(slakh)offset_f': 0.6083431839942932,
8
+ 'test/(slakh)offset_p': 0.6622373461723328,
9
+ 'test/(slakh)offset_r': 0.5666455626487732,
10
+ 'test/(slakh)onset_f': 0.7774602174758911,
11
+ 'test/(slakh)onset_f_drum': 0.863847553730011,
12
+ 'test/(slakh)onset_p': 0.847067654132843,
13
+ 'test/(slakh)onset_p_drum': 0.8507407307624817,
14
+ 'test/(slakh)onset_r': 0.7236887216567993,
15
+ 'test/(slakh)onset_r_drum': 0.9046840071678162,
16
+ 'test/macro_offset_f': 0.6083431839942932,
17
+ 'test/macro_offset_p': 0.6622373461723328,
18
+ 'test/macro_offset_r': 0.5666455626487732,
19
+ 'test/macro_onset_f': 0.7774602174758911,
20
+ 'test/macro_onset_f_drum': 0.863847553730011,
21
+ 'test/macro_onset_p': 0.847067654132843,
22
+ 'test/macro_onset_p_drum': 0.8507407307624817,
23
+ 'test/macro_onset_r': 0.7236887216567993,
24
+ 'test/macro_onset_r_drum': 0.9046840071678162}],
25
+ '-----------------------------------------------------------------',
26
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
27
+ 'Kick Drum': [36, 35],
28
+ 'Snare Drum': [38, 40]},
29
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
30
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
31
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
32
+ 'presets': ['musicnet_thickstun']},
33
+ [ { 'test/(musicnet)offset_f': 0.3357182443141937,
34
+ 'test/(musicnet)offset_f_Piano': 0.48227936029434204,
35
+ 'test/(musicnet)offset_f_Strings': 0.25815683603286743,
36
+ 'test/(musicnet)offset_f_Winds': 0.26120856404304504,
37
+ 'test/(musicnet)offset_p': 0.34956488013267517,
38
+ 'test/(musicnet)offset_p_Piano': 0.4812760055065155,
39
+ 'test/(musicnet)offset_p_Strings': 0.29323798418045044,
40
+ 'test/(musicnet)offset_p_Winds': 0.25695109367370605,
41
+ 'test/(musicnet)offset_r': 0.32450374960899353,
42
+ 'test/(musicnet)offset_r_Piano': 0.48328691720962524,
43
+ 'test/(musicnet)offset_r_Strings': 0.23057259619235992,
44
+ 'test/(musicnet)offset_r_Winds': 0.2656095027923584,
45
+ 'test/(musicnet)onset_f': 0.61080002784729,
46
+ 'test/(musicnet)onset_f_Piano': 0.8672689199447632,
47
+ 'test/(musicnet)onset_f_Strings': 0.41614195704460144,
48
+ 'test/(musicnet)onset_f_Winds': 0.5253411531448364,
49
+ 'test/(musicnet)onset_f_drum': nan,
50
+ 'test/(musicnet)onset_p': 0.6341683268547058,
51
+ 'test/(musicnet)onset_p_Piano': 0.8654646277427673,
52
+ 'test/(musicnet)onset_p_Strings': 0.4726918041706085,
53
+ 'test/(musicnet)onset_p_Winds': 0.5167785286903381,
54
+ 'test/(musicnet)onset_p_drum': nan,
55
+ 'test/(musicnet)onset_r': 0.5917337536811829,
56
+ 'test/(musicnet)onset_r_Piano': 0.8690807819366455,
57
+ 'test/(musicnet)onset_r_Strings': 0.37167689204216003,
58
+ 'test/(musicnet)onset_r_Winds': 0.5341922640800476,
59
+ 'test/(musicnet)onset_r_drum': nan,
60
+ 'test/macro_offset_f': 0.3357182443141937,
61
+ 'test/macro_offset_f_Piano': 0.48227936029434204,
62
+ 'test/macro_offset_f_Strings': 0.25815683603286743,
63
+ 'test/macro_offset_f_Winds': 0.26120856404304504,
64
+ 'test/macro_offset_p': 0.34956488013267517,
65
+ 'test/macro_offset_p_Piano': 0.4812760055065155,
66
+ 'test/macro_offset_p_Strings': 0.29323798418045044,
67
+ 'test/macro_offset_p_Winds': 0.25695109367370605,
68
+ 'test/macro_offset_r': 0.32450374960899353,
69
+ 'test/macro_offset_r_Piano': 0.48328691720962524,
70
+ 'test/macro_offset_r_Strings': 0.23057259619235992,
71
+ 'test/macro_offset_r_Winds': 0.2656095027923584,
72
+ 'test/macro_onset_f': 0.61080002784729,
73
+ 'test/macro_onset_f_Piano': 0.8672689199447632,
74
+ 'test/macro_onset_f_Strings': 0.41614195704460144,
75
+ 'test/macro_onset_f_Winds': 0.5253411531448364,
76
+ 'test/macro_onset_f_drum': nan,
77
+ 'test/macro_onset_p': 0.6341683268547058,
78
+ 'test/macro_onset_p_Piano': 0.8654646277427673,
79
+ 'test/macro_onset_p_Strings': 0.4726918041706085,
80
+ 'test/macro_onset_p_Winds': 0.5167785286903381,
81
+ 'test/macro_onset_p_drum': nan,
82
+ 'test/macro_onset_r': 0.5917337536811829,
83
+ 'test/macro_onset_r_Piano': 0.8690807819366455,
84
+ 'test/macro_onset_r_Strings': 0.37167689204216003,
85
+ 'test/macro_onset_r_Winds': 0.5341922640800476,
86
+ 'test/macro_onset_r_drum': nan}],
87
+ '-----------------------------------------------------------------',
88
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
89
+ 'Kick Drum': [36, 35],
90
+ 'Snare Drum': [38, 40]},
91
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
92
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
93
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
94
+ 'presets': ['musicnet_thickstun_em']},
95
+ [ { 'test/(musicnet)offset_f': 0.6265446543693542,
96
+ 'test/(musicnet)offset_f_Piano': 0.8271092772483826,
97
+ 'test/(musicnet)offset_f_Strings': 0.5159010887145996,
98
+ 'test/(musicnet)offset_f_Winds': 0.5344406366348267,
99
+ 'test/(musicnet)offset_p': 0.6488454937934875,
100
+ 'test/(musicnet)offset_p_Piano': 0.8294035792350769,
101
+ 'test/(musicnet)offset_p_Strings': 0.5695708990097046,
102
+ 'test/(musicnet)offset_p_Winds': 0.5244486927986145,
103
+ 'test/(musicnet)offset_r': 0.6074720025062561,
104
+ 'test/(musicnet)offset_r_Piano': 0.8248276114463806,
105
+ 'test/(musicnet)offset_r_Strings': 0.47147470712661743,
106
+ 'test/(musicnet)offset_r_Winds': 0.5448207259178162,
107
+ 'test/(musicnet)onset_f': 0.8307012915611267,
108
+ 'test/(musicnet)onset_f_Piano': 0.9861687421798706,
109
+ 'test/(musicnet)onset_f_Strings': 0.6919905543327332,
110
+ 'test/(musicnet)onset_f_Winds': 0.8070346713066101,
111
+ 'test/(musicnet)onset_f_drum': nan,
112
+ 'test/(musicnet)onset_p': 0.861139714717865,
113
+ 'test/(musicnet)onset_p_Piano': 0.9889042973518372,
114
+ 'test/(musicnet)onset_p_Strings': 0.7639791965484619,
115
+ 'test/(musicnet)onset_p_Winds': 0.791946291923523,
116
+ 'test/(musicnet)onset_p_drum': nan,
117
+ 'test/(musicnet)onset_r': 0.8046172261238098,
118
+ 'test/(musicnet)onset_r_Piano': 0.9834482669830322,
119
+ 'test/(musicnet)onset_r_Strings': 0.6324004530906677,
120
+ 'test/(musicnet)onset_r_Winds': 0.8227091431617737,
121
+ 'test/(musicnet)onset_r_drum': nan,
122
+ 'test/macro_offset_f': 0.6265446543693542,
123
+ 'test/macro_offset_f_Piano': 0.8271092772483826,
124
+ 'test/macro_offset_f_Strings': 0.5159010887145996,
125
+ 'test/macro_offset_f_Winds': 0.5344406366348267,
126
+ 'test/macro_offset_p': 0.6488454937934875,
127
+ 'test/macro_offset_p_Piano': 0.8294035792350769,
128
+ 'test/macro_offset_p_Strings': 0.5695708990097046,
129
+ 'test/macro_offset_p_Winds': 0.5244486927986145,
130
+ 'test/macro_offset_r': 0.6074720025062561,
131
+ 'test/macro_offset_r_Piano': 0.8248276114463806,
132
+ 'test/macro_offset_r_Strings': 0.47147470712661743,
133
+ 'test/macro_offset_r_Winds': 0.5448207259178162,
134
+ 'test/macro_onset_f': 0.8307012915611267,
135
+ 'test/macro_onset_f_Piano': 0.9861687421798706,
136
+ 'test/macro_onset_f_Strings': 0.6919905543327332,
137
+ 'test/macro_onset_f_Winds': 0.8070346713066101,
138
+ 'test/macro_onset_f_drum': nan,
139
+ 'test/macro_onset_p': 0.861139714717865,
140
+ 'test/macro_onset_p_Piano': 0.9889042973518372,
141
+ 'test/macro_onset_p_Strings': 0.7639791965484619,
142
+ 'test/macro_onset_p_Winds': 0.791946291923523,
143
+ 'test/macro_onset_p_drum': nan,
144
+ 'test/macro_onset_r': 0.8046172261238098,
145
+ 'test/macro_onset_r_Piano': 0.9834482669830322,
146
+ 'test/macro_onset_r_Strings': 0.6324004530906677,
147
+ 'test/macro_onset_r_Winds': 0.8227091431617737,
148
+ 'test/macro_onset_r_drum': nan}],
149
+ '-----------------------------------------------------------------',
150
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
151
+ 'Kick Drum': [36, 35],
152
+ 'Snare Drum': [38, 40]},
153
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
154
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
155
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
156
+ 'presets': ['musicnet_thickstun_ext']},
157
+ [ { 'test/(musicnet)offset_f': 0.3959674835205078,
158
+ 'test/(musicnet)offset_f_Piano': 0.3805476725101471,
159
+ 'test/(musicnet)offset_f_Strings': 0.4404304027557373,
160
+ 'test/(musicnet)offset_f_Winds': 0.30910158157348633,
161
+ 'test/(musicnet)offset_p': 0.39991551637649536,
162
+ 'test/(musicnet)offset_p_Piano': 0.30609330534935,
163
+ 'test/(musicnet)offset_p_Strings': 0.45054253935813904,
164
+ 'test/(musicnet)offset_p_Winds': 0.198689267039299,
165
+ 'test/(musicnet)offset_r': 0.39315736293792725,
166
+ 'test/(musicnet)offset_r_Piano': 0.3786337375640869,
167
+ 'test/(musicnet)offset_r_Strings': 0.4319221079349518,
168
+ 'test/(musicnet)offset_r_Winds': 0.3212359547615051,
169
+ 'test/(musicnet)onset_f': 0.6437932252883911,
170
+ 'test/(musicnet)onset_f_Piano': 0.7200534343719482,
171
+ 'test/(musicnet)onset_f_Strings': 0.6363336443901062,
172
+ 'test/(musicnet)onset_f_Winds': 0.5482662916183472,
173
+ 'test/(musicnet)onset_f_drum': nan,
174
+ 'test/(musicnet)onset_p': 0.6502920985221863,
175
+ 'test/(musicnet)onset_p_Piano': 0.5792450308799744,
176
+ 'test/(musicnet)onset_p_Strings': 0.652239203453064,
177
+ 'test/(musicnet)onset_p_Winds': 0.3531217575073242,
178
+ 'test/(musicnet)onset_p_drum': nan,
179
+ 'test/(musicnet)onset_r': 0.6391699910163879,
180
+ 'test/(musicnet)onset_r_Piano': 0.716358482837677,
181
+ 'test/(musicnet)onset_r_Strings': 0.6230224967002869,
182
+ 'test/(musicnet)onset_r_Winds': 0.5685860514640808,
183
+ 'test/(musicnet)onset_r_drum': nan,
184
+ 'test/macro_offset_f': 0.3959674835205078,
185
+ 'test/macro_offset_f_Piano': 0.3805476725101471,
186
+ 'test/macro_offset_f_Strings': 0.4404304027557373,
187
+ 'test/macro_offset_f_Winds': 0.30910158157348633,
188
+ 'test/macro_offset_p': 0.39991551637649536,
189
+ 'test/macro_offset_p_Piano': 0.30609330534935,
190
+ 'test/macro_offset_p_Strings': 0.45054253935813904,
191
+ 'test/macro_offset_p_Winds': 0.198689267039299,
192
+ 'test/macro_offset_r': 0.39315736293792725,
193
+ 'test/macro_offset_r_Piano': 0.3786337375640869,
194
+ 'test/macro_offset_r_Strings': 0.4319221079349518,
195
+ 'test/macro_offset_r_Winds': 0.3212359547615051,
196
+ 'test/macro_onset_f': 0.6437932252883911,
197
+ 'test/macro_onset_f_Piano': 0.7200534343719482,
198
+ 'test/macro_onset_f_Strings': 0.6363336443901062,
199
+ 'test/macro_onset_f_Winds': 0.5482662916183472,
200
+ 'test/macro_onset_f_drum': nan,
201
+ 'test/macro_onset_p': 0.6502920985221863,
202
+ 'test/macro_onset_p_Piano': 0.5792450308799744,
203
+ 'test/macro_onset_p_Strings': 0.652239203453064,
204
+ 'test/macro_onset_p_Winds': 0.3531217575073242,
205
+ 'test/macro_onset_p_drum': nan,
206
+ 'test/macro_onset_r': 0.6391699910163879,
207
+ 'test/macro_onset_r_Piano': 0.716358482837677,
208
+ 'test/macro_onset_r_Strings': 0.6230224967002869,
209
+ 'test/macro_onset_r_Winds': 0.5685860514640808,
210
+ 'test/macro_onset_r_drum': nan}],
211
+ '-----------------------------------------------------------------',
212
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
213
+ 'Kick Drum': [36, 35],
214
+ 'Snare Drum': [38, 40]},
215
+ 'eval_vocab': [ { 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
216
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51]),
217
+ 'Winds': array([64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79])}],
218
+ 'presets': ['musicnet_thickstun_ext_em']},
219
+ [ { 'test/(musicnet)offset_f': 0.735069990158081,
220
+ 'test/(musicnet)offset_f_Piano': 0.7454416751861572,
221
+ 'test/(musicnet)offset_f_Strings': 0.7720898389816284,
222
+ 'test/(musicnet)offset_f_Winds': 0.6024924516677856,
223
+ 'test/(musicnet)offset_p': 0.7449241280555725,
224
+ 'test/(musicnet)offset_p_Piano': 0.6016747951507568,
225
+ 'test/(musicnet)offset_p_Strings': 0.7943185567855835,
226
+ 'test/(musicnet)offset_p_Winds': 0.38696134090423584,
227
+ 'test/(musicnet)offset_r': 0.7270892262458801,
228
+ 'test/(musicnet)offset_r_Piano': 0.7391372919082642,
229
+ 'test/(musicnet)offset_r_Strings': 0.752295732498169,
230
+ 'test/(musicnet)offset_r_Winds': 0.6266684532165527,
231
+ 'test/(musicnet)onset_f': 0.8967338800430298,
232
+ 'test/(musicnet)onset_f_Piano': 0.9509001970291138,
233
+ 'test/(musicnet)onset_f_Strings': 0.8831208944320679,
234
+ 'test/(musicnet)onset_f_Winds': 0.8164795637130737,
235
+ 'test/(musicnet)onset_f_drum': nan,
236
+ 'test/(musicnet)onset_p': 0.9097173810005188,
237
+ 'test/(musicnet)onset_p_Piano': 0.7678321003913879,
238
+ 'test/(musicnet)onset_p_Strings': 0.9111027717590332,
239
+ 'test/(musicnet)onset_p_Winds': 0.5252859592437744,
240
+ 'test/(musicnet)onset_p_drum': nan,
241
+ 'test/(musicnet)onset_r': 0.8861774206161499,
242
+ 'test/(musicnet)onset_r_Piano': 0.9424763321876526,
243
+ 'test/(musicnet)onset_r_Strings': 0.8583273887634277,
244
+ 'test/(musicnet)onset_r_Winds': 0.8477022647857666,
245
+ 'test/(musicnet)onset_r_drum': nan,
246
+ 'test/macro_offset_f': 0.735069990158081,
247
+ 'test/macro_offset_f_Piano': 0.7454416751861572,
248
+ 'test/macro_offset_f_Strings': 0.7720898389816284,
249
+ 'test/macro_offset_f_Winds': 0.6024924516677856,
250
+ 'test/macro_offset_p': 0.7449241280555725,
251
+ 'test/macro_offset_p_Piano': 0.6016747951507568,
252
+ 'test/macro_offset_p_Strings': 0.7943185567855835,
253
+ 'test/macro_offset_p_Winds': 0.38696134090423584,
254
+ 'test/macro_offset_r': 0.7270892262458801,
255
+ 'test/macro_offset_r_Piano': 0.7391372919082642,
256
+ 'test/macro_offset_r_Strings': 0.752295732498169,
257
+ 'test/macro_offset_r_Winds': 0.6266684532165527,
258
+ 'test/macro_onset_f': 0.8967338800430298,
259
+ 'test/macro_onset_f_Piano': 0.9509001970291138,
260
+ 'test/macro_onset_f_Strings': 0.8831208944320679,
261
+ 'test/macro_onset_f_Winds': 0.8164795637130737,
262
+ 'test/macro_onset_f_drum': nan,
263
+ 'test/macro_onset_p': 0.9097173810005188,
264
+ 'test/macro_onset_p_Piano': 0.7678321003913879,
265
+ 'test/macro_onset_p_Strings': 0.9111027717590332,
266
+ 'test/macro_onset_p_Winds': 0.5252859592437744,
267
+ 'test/macro_onset_p_drum': nan,
268
+ 'test/macro_onset_r': 0.8861774206161499,
269
+ 'test/macro_onset_r_Piano': 0.9424763321876526,
270
+ 'test/macro_onset_r_Strings': 0.8583273887634277,
271
+ 'test/macro_onset_r_Winds': 0.8477022647857666,
272
+ 'test/macro_onset_r_drum': nan}],
273
+ '-----------------------------------------------------------------',
274
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
275
+ 'Kick Drum': [36, 35],
276
+ 'Snare Drum': [38, 40]},
277
+ 'eval_vocab': [{'Singing Voice': [100]}],
278
+ 'presets': ['mir_st500_voc']},
279
+ [ { 'test/(mir_st500)offset_f': 0.5048608183860779,
280
+ 'test/(mir_st500)offset_f_Singing Voice': 0.5061416029930115,
281
+ 'test/(mir_st500)offset_p': 0.5133110880851746,
282
+ 'test/(mir_st500)offset_p_Singing Voice': 0.5161048769950867,
283
+ 'test/(mir_st500)offset_r': 0.4993303716182709,
284
+ 'test/(mir_st500)offset_r_Singing Voice': 0.49919605255126953,
285
+ 'test/(mir_st500)onset_f': 0.6959291696548462,
286
+ 'test/(mir_st500)onset_f_Singing Voice': 0.6976490616798401,
287
+ 'test/(mir_st500)onset_f_drum': nan,
288
+ 'test/(mir_st500)onset_p': 0.7078328728675842,
289
+ 'test/(mir_st500)onset_p_Singing Voice': 0.7117308974266052,
290
+ 'test/(mir_st500)onset_p_drum': 0.0,
291
+ 'test/(mir_st500)onset_r': 0.6881252527236938,
292
+ 'test/(mir_st500)onset_r_Singing Voice': 0.6878247261047363,
293
+ 'test/(mir_st500)onset_r_drum': nan,
294
+ 'test/macro_offset_f': 0.5048608183860779,
295
+ 'test/macro_offset_f_Singing Voice': 0.5061416029930115,
296
+ 'test/macro_offset_p': 0.5133110880851746,
297
+ 'test/macro_offset_p_Singing Voice': 0.5161048769950867,
298
+ 'test/macro_offset_r': 0.4993303716182709,
299
+ 'test/macro_offset_r_Singing Voice': 0.49919605255126953,
300
+ 'test/macro_onset_f': 0.6959291696548462,
301
+ 'test/macro_onset_f_Singing Voice': 0.6976490616798401,
302
+ 'test/macro_onset_f_drum': nan,
303
+ 'test/macro_onset_p': 0.7078328728675842,
304
+ 'test/macro_onset_p_Singing Voice': 0.7117308974266052,
305
+ 'test/macro_onset_p_drum': 0.0,
306
+ 'test/macro_onset_r': 0.6881252527236938,
307
+ 'test/macro_onset_r_Singing Voice': 0.6878247261047363,
308
+ 'test/macro_onset_r_drum': nan}],
309
+ '-----------------------------------------------------------------',
310
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
311
+ 'Kick Drum': [36, 35],
312
+ 'Snare Drum': [38, 40]},
313
+ 'eval_vocab': [{'Singing Voice': [100]}],
314
+ 'presets': ['mir_st500']},
315
+ [ { 'test/(mir_st500)offset_f': 0.08752609044313431,
316
+ 'test/(mir_st500)offset_f_Singing Voice': 0.4340810775756836,
317
+ 'test/(mir_st500)offset_p': 0.04927415773272514,
318
+ 'test/(mir_st500)offset_p_Singing Voice': 0.44511955976486206,
319
+ 'test/(mir_st500)offset_r': 0.4603072702884674,
320
+ 'test/(mir_st500)offset_r_Singing Voice': 0.427217572927475,
321
+ 'test/(mir_st500)onset_f': 0.1349075436592102,
322
+ 'test/(mir_st500)onset_f_Singing Voice': 0.6468554735183716,
323
+ 'test/(mir_st500)onset_f_drum': nan,
324
+ 'test/(mir_st500)onset_p': 0.07584631443023682,
325
+ 'test/(mir_st500)onset_p_Singing Voice': 0.6652325391769409,
326
+ 'test/(mir_st500)onset_p_drum': 0.0,
327
+ 'test/(mir_st500)onset_r': 0.7170646786689758,
328
+ 'test/(mir_st500)onset_r_Singing Voice': 0.6353515386581421,
329
+ 'test/(mir_st500)onset_r_drum': nan,
330
+ 'test/macro_offset_f': 0.08752609044313431,
331
+ 'test/macro_offset_f_Singing Voice': 0.4340810775756836,
332
+ 'test/macro_offset_p': 0.04927415773272514,
333
+ 'test/macro_offset_p_Singing Voice': 0.44511955976486206,
334
+ 'test/macro_offset_r': 0.4603072702884674,
335
+ 'test/macro_offset_r_Singing Voice': 0.427217572927475,
336
+ 'test/macro_onset_f': 0.1349075436592102,
337
+ 'test/macro_onset_f_Singing Voice': 0.6468554735183716,
338
+ 'test/macro_onset_f_drum': nan,
339
+ 'test/macro_onset_p': 0.07584631443023682,
340
+ 'test/macro_onset_p_Singing Voice': 0.6652325391769409,
341
+ 'test/macro_onset_p_drum': 0.0,
342
+ 'test/macro_onset_r': 0.7170646786689758,
343
+ 'test/macro_onset_r_Singing Voice': 0.6353515386581421,
344
+ 'test/macro_onset_r_drum': nan}],
345
+ '-----------------------------------------------------------------',
346
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
347
+ 'Kick Drum': [36, 35],
348
+ 'Snare Drum': [38, 40]},
349
+ 'eval_vocab': [None],
350
+ 'presets': ['enstdrums_dtp']},
351
+ [ { 'test/(enstdrums)offset_f': nan,
352
+ 'test/(enstdrums)offset_p': 0.0,
353
+ 'test/(enstdrums)offset_r': nan,
354
+ 'test/(enstdrums)onset_f': nan,
355
+ 'test/(enstdrums)onset_f_drum': 0.8802791833877563,
356
+ 'test/(enstdrums)onset_p': 0.0,
357
+ 'test/(enstdrums)onset_p_drum': 0.8829243183135986,
358
+ 'test/(enstdrums)onset_r': nan,
359
+ 'test/(enstdrums)onset_r_drum': 0.8793073296546936,
360
+ 'test/macro_offset_f': nan,
361
+ 'test/macro_offset_p': 0.0,
362
+ 'test/macro_offset_r': nan,
363
+ 'test/macro_onset_f': nan,
364
+ 'test/macro_onset_f_drum': 0.8802791833877563,
365
+ 'test/macro_onset_p': 0.0,
366
+ 'test/macro_onset_p_drum': 0.8829243183135986,
367
+ 'test/macro_onset_r': nan,
368
+ 'test/macro_onset_r_drum': 0.8793073296546936}],
369
+ '-----------------------------------------------------------------',
370
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
371
+ 'Kick Drum': [36, 35],
372
+ 'Snare Drum': [38, 40]},
373
+ 'eval_vocab': [None],
374
+ 'presets': ['enstdrums_dtm']},
375
+ [ { 'test/(enstdrums)offset_f': nan,
376
+ 'test/(enstdrums)offset_p': 0.0,
377
+ 'test/(enstdrums)offset_r': nan,
378
+ 'test/(enstdrums)onset_f': nan,
379
+ 'test/(enstdrums)onset_f_drum': 0.8131747245788574,
380
+ 'test/(enstdrums)onset_p': 0.0,
381
+ 'test/(enstdrums)onset_p_drum': 0.8289992213249207,
382
+ 'test/(enstdrums)onset_r': nan,
383
+ 'test/(enstdrums)onset_r_drum': 0.8042031526565552,
384
+ 'test/macro_offset_f': nan,
385
+ 'test/macro_offset_p': 0.0,
386
+ 'test/macro_offset_r': nan,
387
+ 'test/macro_onset_f': nan,
388
+ 'test/macro_onset_f_drum': 0.8131747245788574,
389
+ 'test/macro_onset_p': 0.0,
390
+ 'test/macro_onset_p_drum': 0.8289992213249207,
391
+ 'test/macro_onset_r': nan,
392
+ 'test/macro_onset_r_drum': 0.8042031526565552}],
393
+ '-----------------------------------------------------------------',
394
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
395
+ 'Kick Drum': [36, 35],
396
+ 'Snare Drum': [38, 40]},
397
+ 'eval_vocab': [None],
398
+ 'presets': ['guitarset_pshift']},
399
+ [ { 'test/(guitarset)offset_f': 0.7842957973480225,
400
+ 'test/(guitarset)offset_p': 0.7916637063026428,
401
+ 'test/(guitarset)offset_r': 0.7788985967636108,
402
+ 'test/(guitarset)onset_f': 0.8876206874847412,
403
+ 'test/(guitarset)onset_f_drum': nan,
404
+ 'test/(guitarset)onset_p': 0.8953325748443604,
405
+ 'test/(guitarset)onset_p_drum': 0.0,
406
+ 'test/(guitarset)onset_r': 0.8822049498558044,
407
+ 'test/(guitarset)onset_r_drum': nan,
408
+ 'test/macro_offset_f': 0.7842957973480225,
409
+ 'test/macro_offset_p': 0.7916637063026428,
410
+ 'test/macro_offset_r': 0.7788985967636108,
411
+ 'test/macro_onset_f': 0.8876206874847412,
412
+ 'test/macro_onset_f_drum': nan,
413
+ 'test/macro_onset_p': 0.8953325748443604,
414
+ 'test/macro_onset_p_drum': 0.0,
415
+ 'test/macro_onset_r': 0.8822049498558044,
416
+ 'test/macro_onset_r_drum': nan}],
417
+ '-----------------------------------------------------------------',
418
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
419
+ 'Kick Drum': [36, 35],
420
+ 'Snare Drum': [38, 40]},
421
+ 'eval_vocab': [{'Bass': array([32, 33, 34, 35, 36, 37, 38, 39])}],
422
+ 'presets': ['rwc_pop_bass']},
423
+ [ { 'test/(rwc_pop)offset_f': 0.08935749530792236,
424
+ 'test/(rwc_pop)offset_f_Bass': 0.2769419252872467,
425
+ 'test/(rwc_pop)offset_p': 0.05388070270419121,
426
+ 'test/(rwc_pop)offset_p_Bass': 0.33313581347465515,
427
+ 'test/(rwc_pop)offset_r': 0.3093760907649994,
428
+ 'test/(rwc_pop)offset_r_Bass': 0.24953310191631317,
429
+ 'test/(rwc_pop)onset_f': 0.12388304620981216,
430
+ 'test/(rwc_pop)onset_f_Bass': 0.364982932806015,
431
+ 'test/(rwc_pop)onset_f_drum': nan,
432
+ 'test/(rwc_pop)onset_p': 0.07472005486488342,
433
+ 'test/(rwc_pop)onset_p_Bass': 0.438348650932312,
434
+ 'test/(rwc_pop)onset_p_drum': 0.0,
435
+ 'test/(rwc_pop)onset_r': 0.43698573112487793,
436
+ 'test/(rwc_pop)onset_r_Bass': 0.3284376561641693,
437
+ 'test/(rwc_pop)onset_r_drum': nan,
438
+ 'test/macro_offset_f': 0.08935749530792236,
439
+ 'test/macro_offset_f_Bass': 0.2769419252872467,
440
+ 'test/macro_offset_p': 0.05388070270419121,
441
+ 'test/macro_offset_p_Bass': 0.33313581347465515,
442
+ 'test/macro_offset_r': 0.3093760907649994,
443
+ 'test/macro_offset_r_Bass': 0.24953310191631317,
444
+ 'test/macro_onset_f': 0.12388304620981216,
445
+ 'test/macro_onset_f_Bass': 0.364982932806015,
446
+ 'test/macro_onset_f_drum': nan,
447
+ 'test/macro_onset_p': 0.07472005486488342,
448
+ 'test/macro_onset_p_Bass': 0.438348650932312,
449
+ 'test/macro_onset_p_drum': 0.0,
450
+ 'test/macro_onset_r': 0.43698573112487793,
451
+ 'test/macro_onset_r_Bass': 0.3284376561641693,
452
+ 'test/macro_onset_r_drum': nan}],
453
+ '-----------------------------------------------------------------',
454
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
455
+ 'Kick Drum': [36, 35],
456
+ 'Snare Drum': [38, 40]},
457
+ 'eval_vocab': [{'Piano': array([0, 1, 2, 3, 4, 5, 6, 7])}],
458
+ 'presets': ['maestro']},
459
+ [ { 'test/(maestro)offset_f': 0.8026146292686462,
460
+ 'test/(maestro)offset_f_Piano': 0.8027480244636536,
461
+ 'test/(maestro)offset_p': 0.8284508585929871,
462
+ 'test/(maestro)offset_p_Piano': 0.8287417888641357,
463
+ 'test/(maestro)offset_r': 0.7789899706840515,
464
+ 'test/(maestro)offset_r_Piano': 0.7789827585220337,
465
+ 'test/(maestro)onset_f': 0.9469380974769592,
466
+ 'test/(maestro)onset_f_Piano': 0.9470942616462708,
467
+ 'test/(maestro)onset_f_drum': nan,
468
+ 'test/(maestro)onset_p': 0.9780700206756592,
469
+ 'test/(maestro)onset_p_Piano': 0.9784316420555115,
470
+ 'test/(maestro)onset_p_drum': 0.0,
471
+ 'test/(maestro)onset_r': 0.9185109734535217,
472
+ 'test/(maestro)onset_r_Piano': 0.9184830784797668,
473
+ 'test/(maestro)onset_r_drum': nan,
474
+ 'test/macro_offset_f': 0.8026146292686462,
475
+ 'test/macro_offset_f_Piano': 0.8027480244636536,
476
+ 'test/macro_offset_p': 0.8284508585929871,
477
+ 'test/macro_offset_p_Piano': 0.8287417888641357,
478
+ 'test/macro_offset_r': 0.7789899706840515,
479
+ 'test/macro_offset_r_Piano': 0.7789827585220337,
480
+ 'test/macro_onset_f': 0.9469380974769592,
481
+ 'test/macro_onset_f_Piano': 0.9470942616462708,
482
+ 'test/macro_onset_f_drum': nan,
483
+ 'test/macro_onset_p': 0.9780700206756592,
484
+ 'test/macro_onset_p_Piano': 0.9784316420555115,
485
+ 'test/macro_onset_p_drum': 0.0,
486
+ 'test/macro_onset_r': 0.9185109734535217,
487
+ 'test/macro_onset_r_Piano': 0.9184830784797668,
488
+ 'test/macro_onset_r_drum': nan}],
489
+ '-----------------------------------------------------------------',
490
+ { 'eval_drum_vocab': { 'Hi-Hat': [42, 44, 46, 22, 26],
491
+ 'Kick Drum': [36, 35],
492
+ 'Snare Drum': [38, 40]},
493
+ 'eval_vocab': [ { 'Bass': array([32, 33, 34, 35, 36, 37, 38, 39]),
494
+ 'Brass': array([56, 57, 58, 59, 60, 61, 62, 63]),
495
+ 'Chromatic Percussion': array([ 8, 9, 10, 11, 12, 13, 14, 15]),
496
+ 'Guitar': array([24, 25, 26, 27, 28, 29, 30, 31]),
497
+ 'Organ': array([16, 17, 18, 19, 20, 21, 22, 23]),
498
+ 'Piano': array([0, 1, 2, 3, 4, 5, 6, 7]),
499
+ 'Pipe': array([72, 73, 74, 75, 76, 77, 78, 79]),
500
+ 'Reed': array([64, 65, 66, 67, 68, 69, 70, 71]),
501
+ 'Strings': array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]),
502
+ 'Synth Lead': array([80, 81, 82, 83, 84, 85, 86, 87]),
503
+ 'Synth Pad': array([88, 89, 90, 91, 92, 93, 94, 95])}],
504
+ 'presets': ['urmp']},
505
+ [ { 'test/(urmp)offset_f': 0.6520498991012573,
506
+ 'test/(urmp)offset_f_Bass': nan,
507
+ 'test/(urmp)offset_f_Brass': 0.6848000288009644,
508
+ 'test/(urmp)offset_f_Chromatic Percussion': nan,
509
+ 'test/(urmp)offset_f_Guitar': nan,
510
+ 'test/(urmp)offset_f_Organ': nan,
511
+ 'test/(urmp)offset_f_Piano': nan,
512
+ 'test/(urmp)offset_f_Pipe': nan,
513
+ 'test/(urmp)offset_f_Reed': 0.10385715961456299,
514
+ 'test/(urmp)offset_f_Strings': 0.6481411457061768,
515
+ 'test/(urmp)offset_f_Synth Lead': nan,
516
+ 'test/(urmp)offset_f_Synth Pad': nan,
517
+ 'test/(urmp)offset_p': 0.6840448975563049,
518
+ 'test/(urmp)offset_p_Bass': nan,
519
+ 'test/(urmp)offset_p_Brass': 0.17973124980926514,
520
+ 'test/(urmp)offset_p_Chromatic Percussion': nan,
521
+ 'test/(urmp)offset_p_Guitar': 0.0,
522
+ 'test/(urmp)offset_p_Organ': nan,
523
+ 'test/(urmp)offset_p_Piano': nan,
524
+ 'test/(urmp)offset_p_Pipe': 0.0,
525
+ 'test/(urmp)offset_p_Reed': 0.0595238134264946,
526
+ 'test/(urmp)offset_p_Strings': 0.6781148910522461,
527
+ 'test/(urmp)offset_p_Synth Lead': nan,
528
+ 'test/(urmp)offset_p_Synth Pad': nan,
529
+ 'test/(urmp)offset_r': 0.6238455176353455,
530
+ 'test/(urmp)offset_r_Bass': nan,
531
+ 'test/(urmp)offset_r_Brass': 0.6537678241729736,
532
+ 'test/(urmp)offset_r_Chromatic Percussion': nan,
533
+ 'test/(urmp)offset_r_Guitar': nan,
534
+ 'test/(urmp)offset_r_Organ': nan,
535
+ 'test/(urmp)offset_r_Piano': nan,
536
+ 'test/(urmp)offset_r_Pipe': nan,
537
+ 'test/(urmp)offset_r_Reed': 0.06984802335500717,
538
+ 'test/(urmp)offset_r_Strings': 0.621437132358551,
539
+ 'test/(urmp)offset_r_Synth Lead': nan,
540
+ 'test/(urmp)offset_r_Synth Pad': nan,
541
+ 'test/(urmp)onset_f': 0.8094742298126221,
542
+ 'test/(urmp)onset_f_Bass': nan,
543
+ 'test/(urmp)onset_f_Brass': 0.865066647529602,
544
+ 'test/(urmp)onset_f_Chromatic Percussion': nan,
545
+ 'test/(urmp)onset_f_Guitar': nan,
546
+ 'test/(urmp)onset_f_Organ': nan,
547
+ 'test/(urmp)onset_f_Piano': nan,
548
+ 'test/(urmp)onset_f_Pipe': nan,
549
+ 'test/(urmp)onset_f_Reed': 0.1948803812265396,
550
+ 'test/(urmp)onset_f_Strings': 0.7926455736160278,
551
+ 'test/(urmp)onset_f_Synth Lead': nan,
552
+ 'test/(urmp)onset_f_Synth Pad': nan,
553
+ 'test/(urmp)onset_f_drum': nan,
554
+ 'test/(urmp)onset_p': 0.8502802848815918,
555
+ 'test/(urmp)onset_p_Bass': nan,
556
+ 'test/(urmp)onset_p_Brass': 0.22704367339611053,
557
+ 'test/(urmp)onset_p_Chromatic Percussion': nan,
558
+ 'test/(urmp)onset_p_Guitar': 0.0,
559
+ 'test/(urmp)onset_p_Organ': nan,
560
+ 'test/(urmp)onset_p_Piano': nan,
561
+ 'test/(urmp)onset_p_Pipe': 0.0,
562
+ 'test/(urmp)onset_p_Reed': 0.10924369841814041,
563
+ 'test/(urmp)onset_p_Strings': 0.8278226852416992,
564
+ 'test/(urmp)onset_p_Synth Lead': nan,
565
+ 'test/(urmp)onset_p_Synth Pad': nan,
566
+ 'test/(urmp)onset_p_drum': nan,
567
+ 'test/(urmp)onset_r': 0.7737197875976562,
568
+ 'test/(urmp)onset_r_Bass': nan,
569
+ 'test/(urmp)onset_r_Brass': 0.8258655667304993,
570
+ 'test/(urmp)onset_r_Chromatic Percussion': nan,
571
+ 'test/(urmp)onset_r_Guitar': nan,
572
+ 'test/(urmp)onset_r_Organ': nan,
573
+ 'test/(urmp)onset_r_Piano': nan,
574
+ 'test/(urmp)onset_r_Pipe': nan,
575
+ 'test/(urmp)onset_r_Reed': 0.1319148987531662,
576
+ 'test/(urmp)onset_r_Strings': 0.761354923248291,
577
+ 'test/(urmp)onset_r_Synth Lead': nan,
578
+ 'test/(urmp)onset_r_Synth Pad': nan,
579
+ 'test/(urmp)onset_r_drum': nan,
580
+ 'test/macro_offset_f': 0.6520498991012573,
581
+ 'test/macro_offset_f_Bass': nan,
582
+ 'test/macro_offset_f_Brass': 0.6848000288009644,
583
+ 'test/macro_offset_f_Chromatic Percussion': nan,
584
+ 'test/macro_offset_f_Guitar': nan,
585
+ 'test/macro_offset_f_Organ': nan,
586
+ 'test/macro_offset_f_Piano': nan,
587
+ 'test/macro_offset_f_Pipe': nan,
588
+ 'test/macro_offset_f_Reed': 0.10385715961456299,
589
+ 'test/macro_offset_f_Strings': 0.6481411457061768,
590
+ 'test/macro_offset_f_Synth Lead': nan,
591
+ 'test/macro_offset_f_Synth Pad': nan,
592
+ 'test/macro_offset_p': 0.6840448975563049,
593
+ 'test/macro_offset_p_Bass': nan,
594
+ 'test/macro_offset_p_Brass': 0.17973124980926514,
595
+ 'test/macro_offset_p_Chromatic Percussion': nan,
596
+ 'test/macro_offset_p_Guitar': 0.0,
597
+ 'test/macro_offset_p_Organ': nan,
598
+ 'test/macro_offset_p_Piano': nan,
599
+ 'test/macro_offset_p_Pipe': 0.0,
600
+ 'test/macro_offset_p_Reed': 0.0595238134264946,
601
+ 'test/macro_offset_p_Strings': 0.6781148910522461,
602
+ 'test/macro_offset_p_Synth Lead': nan,
603
+ 'test/macro_offset_p_Synth Pad': nan,
604
+ 'test/macro_offset_r': 0.6238455176353455,
605
+ 'test/macro_offset_r_Bass': nan,
606
+ 'test/macro_offset_r_Brass': 0.6537678241729736,
607
+ 'test/macro_offset_r_Chromatic Percussion': nan,
608
+ 'test/macro_offset_r_Guitar': nan,
609
+ 'test/macro_offset_r_Organ': nan,
610
+ 'test/macro_offset_r_Piano': nan,
611
+ 'test/macro_offset_r_Pipe': nan,
612
+ 'test/macro_offset_r_Reed': 0.06984802335500717,
613
+ 'test/macro_offset_r_Strings': 0.621437132358551,
614
+ 'test/macro_offset_r_Synth Lead': nan,
615
+ 'test/macro_offset_r_Synth Pad': nan,
616
+ 'test/macro_onset_f': 0.8094742298126221,
617
+ 'test/macro_onset_f_Bass': nan,
618
+ 'test/macro_onset_f_Brass': 0.865066647529602,
619
+ 'test/macro_onset_f_Chromatic Percussion': nan,
620
+ 'test/macro_onset_f_Guitar': nan,
621
+ 'test/macro_onset_f_Organ': nan,
622
+ 'test/macro_onset_f_Piano': nan,
623
+ 'test/macro_onset_f_Pipe': nan,
624
+ 'test/macro_onset_f_Reed': 0.1948803812265396,
625
+ 'test/macro_onset_f_Strings': 0.7926455736160278,
626
+ 'test/macro_onset_f_Synth Lead': nan,
627
+ 'test/macro_onset_f_Synth Pad': nan,
628
+ 'test/macro_onset_f_drum': nan,
629
+ 'test/macro_onset_p': 0.8502802848815918,
630
+ 'test/macro_onset_p_Bass': nan,
631
+ 'test/macro_onset_p_Brass': 0.22704367339611053,
632
+ 'test/macro_onset_p_Chromatic Percussion': nan,
633
+ 'test/macro_onset_p_Guitar': 0.0,
634
+ 'test/macro_onset_p_Organ': nan,
635
+ 'test/macro_onset_p_Piano': nan,
636
+ 'test/macro_onset_p_Pipe': 0.0,
637
+ 'test/macro_onset_p_Reed': 0.10924369841814041,
638
+ 'test/macro_onset_p_Strings': 0.8278226852416992,
639
+ 'test/macro_onset_p_Synth Lead': nan,
640
+ 'test/macro_onset_p_Synth Pad': nan,
641
+ 'test/macro_onset_p_drum': nan,
642
+ 'test/macro_onset_r': 0.7737197875976562,
643
+ 'test/macro_onset_r_Bass': nan,
644
+ 'test/macro_onset_r_Brass': 0.8258655667304993,
645
+ 'test/macro_onset_r_Chromatic Percussion': nan,
646
+ 'test/macro_onset_r_Guitar': nan,
647
+ 'test/macro_onset_r_Organ': nan,
648
+ 'test/macro_onset_r_Piano': nan,
649
+ 'test/macro_onset_r_Pipe': nan,
650
+ 'test/macro_onset_r_Reed': 0.1319148987531662,
651
+ 'test/macro_onset_r_Strings': 0.761354923248291,
652
+ 'test/macro_onset_r_Synth Lead': nan,
653
+ 'test/macro_onset_r_Synth Pad': nan,
654
+ 'test/macro_onset_r_drum': nan}]]
model/RoPE/RoPE.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """rotary_embedding.py - Rotary Embedding based on https://github.com/lucidrains/rotary-embedding-torch"""
2
+ from typing import Literal, Union, Optional
3
+ from math import pi, log
4
+ from einops import rearrange, repeat
5
+
6
+ import torch
7
+ from torch.nn import Module, ModuleList
8
+ from torch.cuda.amp import autocast
9
+ from torch import nn, einsum, broadcast_tensors, Tensor
10
+
11
+
12
+ # helper functions
13
+ def exists(val):
14
+ return val is not None
15
+
16
+
17
+ def default(val, d):
18
+ return val if exists(val) else d
19
+
20
+
21
+ # broadcat, as tortoise-tts was using it
22
+ def broadcat(tensors, dim=-1):
23
+ broadcasted_tensors = broadcast_tensors(*tensors)
24
+ return torch.cat(broadcasted_tensors, dim=dim)
25
+
26
+
27
+ # rotary embedding helper functions
28
+ def rotate_half(x):
29
+ x = rearrange(x, '... (d r) -> ... d r', r=2)
30
+ x1, x2 = x.unbind(dim=-1)
31
+ x = torch.stack((-x2, x1), dim=-1)
32
+ return rearrange(x, '... d r -> ... (d r)')
33
+
34
+
35
+ @autocast(enabled=False)
36
+ def apply_rotary_emb(freqs, t, start_index=0, scale=1., seq_dim=-2):
37
+ """Applies rotary embedding for pixels."""
38
+ if t.ndim == 3:
39
+ seq_len = t.shape[seq_dim]
40
+ freqs = freqs[-seq_len:].to(t)
41
+
42
+ rot_dim = freqs.shape[-1]
43
+ end_index = start_index + rot_dim
44
+
45
+ assert rot_dim <= t.shape[
46
+ -1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
47
+
48
+ t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
49
+ t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
50
+ return torch.cat((t_left, t, t_right), dim=-1)
51
+
52
+
53
+ # learned rotation helpers
54
+ def apply_learned_rotations(rotations, t, start_index=0, freq_ranges=None):
55
+ if exists(freq_ranges):
56
+ rotations = einsum('..., f -> ... f', rotations, freq_ranges)
57
+ rotations = rearrange(rotations, '... r f -> ... (r f)')
58
+
59
+ rotations = repeat(rotations, '... n -> ... (n r)', r=2)
60
+ return apply_rotary_emb(rotations, t, start_index=start_index)
61
+
62
+
63
+ # classes
64
+ class RotaryEmbedding(Module):
65
+
66
+ def __init__(self,
67
+ dim,
68
+ custom_freqs: Optional[Tensor] = None,
69
+ freqs_for: Union[Literal['lang'], Literal['pixel'], Literal['constant']] = 'lang',
70
+ theta=10000,
71
+ max_freq=10,
72
+ num_freqs=1,
73
+ learned_freq=False,
74
+ use_xpos=False,
75
+ xpos_scale_base=512,
76
+ interpolate_factor=1.,
77
+ theta_rescale_factor=1.,
78
+ seq_before_head_dim=False,
79
+ cache_if_possible=True):
80
+ super().__init__()
81
+ # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
82
+ # has some connection to NTK literature
83
+ # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
84
+
85
+ theta *= theta_rescale_factor**(dim / (dim - 2))
86
+
87
+ self.freqs_for = freqs_for
88
+
89
+ if exists(custom_freqs):
90
+ freqs = custom_freqs
91
+ elif freqs_for == 'lang':
92
+ freqs = 1. / (theta**(torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
93
+ elif freqs_for == 'pixel':
94
+ freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
95
+ elif freqs_for == 'constant':
96
+ freqs = torch.ones(num_freqs).float()
97
+
98
+ self.cache_if_possible = cache_if_possible
99
+
100
+ self.tmp_store('cached_freqs', None)
101
+ self.tmp_store('cached_scales', None)
102
+
103
+ self.freqs = nn.Parameter(freqs, requires_grad=learned_freq)
104
+
105
+ self.learned_freq = learned_freq
106
+
107
+ # dummy for device
108
+
109
+ self.tmp_store('dummy', torch.tensor(0))
110
+
111
+ # default sequence dimension
112
+
113
+ self.seq_before_head_dim = seq_before_head_dim
114
+ self.default_seq_dim = -3 if seq_before_head_dim else -2
115
+
116
+ # interpolation factors
117
+
118
+ assert interpolate_factor >= 1.
119
+ self.interpolate_factor = interpolate_factor
120
+
121
+ # xpos
122
+
123
+ self.use_xpos = use_xpos
124
+ if not use_xpos:
125
+ self.tmp_store('scale', None)
126
+ return
127
+
128
+ scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
129
+ self.scale_base = xpos_scale_base
130
+ self.tmp_store('scale', scale)
131
+
132
+ @property
133
+ def device(self):
134
+ return self.dummy.device
135
+
136
+ def tmp_store(self, key, value):
137
+ self.register_buffer(key, value, persistent=False)
138
+
139
+ def get_seq_pos(self, seq_len, device, dtype, offset=0):
140
+ return (torch.arange(seq_len, device=device, dtype=dtype) + offset) / self.interpolate_factor
141
+
142
+ def rotate_queries_or_keys(self, t, seq_dim=None, offset=0, freq_seq_len=None):
143
+ seq_dim = default(seq_dim, self.default_seq_dim)
144
+
145
+ assert not self.use_xpos, 'you must use `.rotate_queries_and_keys` method instead and pass in both queries and keys, for length extrapolatable rotary embeddings'
146
+
147
+ device, dtype, seq_len = t.device, t.dtype, t.shape[seq_dim]
148
+
149
+ if exists(freq_seq_len):
150
+ assert freq_seq_len >= seq_len
151
+ seq_len = freq_seq_len
152
+
153
+ freqs = self.forward(self.get_seq_pos(seq_len, device=device, dtype=dtype, offset=offset),
154
+ seq_len=seq_len,
155
+ offset=offset)
156
+
157
+ if seq_dim == -3:
158
+ freqs = rearrange(freqs, 'n d -> n 1 d')
159
+
160
+ return apply_rotary_emb(freqs, t, seq_dim=seq_dim)
161
+
162
+ def rotate_queries_with_cached_keys(self, q, k, seq_dim=None, offset=0):
163
+ seq_dim = default(seq_dim, self.default_seq_dim)
164
+
165
+ q_len, k_len = q.shape[seq_dim], k.shape[seq_dim]
166
+ assert q_len <= k_len
167
+ rotated_q = self.rotate_queries_or_keys(q, seq_dim=seq_dim, freq_seq_len=k_len)
168
+ rotated_k = self.rotate_queries_or_keys(k, seq_dim=seq_dim)
169
+
170
+ rotated_q = rotated_q.type(q.dtype)
171
+ rotated_k = rotated_k.type(k.dtype)
172
+
173
+ return rotated_q, rotated_k
174
+
175
+ def rotate_queries_and_keys(self, q, k, seq_dim=None):
176
+ seq_dim = default(seq_dim, self.default_seq_dim)
177
+
178
+ assert self.use_xpos
179
+ device, dtype, seq_len = q.device, q.dtype, q.shape[seq_dim]
180
+
181
+ seq = self.get_seq_pos(seq_len, dtype=dtype, device=device)
182
+
183
+ freqs = self.forward(seq, seq_len=seq_len)
184
+ scale = self.get_scale(seq, seq_len=seq_len).to(dtype)
185
+
186
+ if seq_dim == -3:
187
+ freqs = rearrange(freqs, 'n d -> n 1 d')
188
+ scale = rearrange(scale, 'n d -> n 1 d')
189
+
190
+ rotated_q = apply_rotary_emb(freqs, q, scale=scale, seq_dim=seq_dim)
191
+ rotated_k = apply_rotary_emb(freqs, k, scale=scale**-1, seq_dim=seq_dim)
192
+
193
+ rotated_q = rotated_q.type(q.dtype)
194
+ rotated_k = rotated_k.type(k.dtype)
195
+
196
+ return rotated_q, rotated_k
197
+
198
+ def get_scale(self, t: Tensor, seq_len: Optional[int] = None, offset=0):
199
+ assert self.use_xpos
200
+
201
+ should_cache = (self.cache_if_possible and exists(seq_len))
202
+
203
+ if (
204
+ should_cache and \
205
+ exists(self.cached_scales) and \
206
+ (seq_len + offset) <= self.cached_scales.shape[0]
207
+ ):
208
+ return self.cached_scales[offset:(offset + seq_len)]
209
+
210
+ scale = 1.
211
+ if self.use_xpos:
212
+ power = (t - len(t) // 2) / self.scale_base
213
+ scale = self.scale**rearrange(power, 'n -> n 1')
214
+ scale = torch.cat((scale, scale), dim=-1)
215
+
216
+ if should_cache:
217
+ self.tmp_store('cached_scales', scale)
218
+
219
+ return scale
220
+
221
+ def get_axial_freqs(self, *dims):
222
+ Colon = slice(None)
223
+ all_freqs = []
224
+
225
+ for ind, dim in enumerate(dims):
226
+ if self.freqs_for == 'pixel':
227
+ pos = torch.linspace(-1, 1, steps=dim, device=self.device)
228
+ else:
229
+ pos = torch.arange(dim, device=self.device)
230
+
231
+ freqs = self.forward(pos, seq_len=dim)
232
+
233
+ all_axis = [None] * len(dims)
234
+ all_axis[ind] = Colon
235
+
236
+ new_axis_slice = (Ellipsis, *all_axis, Colon)
237
+ all_freqs.append(freqs[new_axis_slice])
238
+
239
+ all_freqs = broadcast_tensors(*all_freqs)
240
+ return torch.cat(all_freqs, dim=-1)
241
+
242
+ @autocast(enabled=False)
243
+ def forward(self, t: Tensor, seq_len=None, offset=0):
244
+ should_cache = (
245
+ self.cache_if_possible and \
246
+ not self.learned_freq and \
247
+ exists(seq_len) and \
248
+ self.freqs_for != 'pixel'
249
+ )
250
+
251
+ if (
252
+ should_cache and \
253
+ exists(self.cached_freqs) and \
254
+ (offset + seq_len) <= self.cached_freqs.shape[0]
255
+ ):
256
+ return self.cached_freqs[offset:(offset + seq_len)].detach()
257
+
258
+ freqs = self.freqs
259
+
260
+ freqs = einsum('..., f -> ... f', t.type(freqs.dtype), freqs)
261
+ freqs = repeat(freqs, '... n -> ... (n r)', r=2)
262
+
263
+ if should_cache:
264
+ self.tmp_store('cached_freqs', freqs.detach())
265
+
266
+ return freqs
267
+
268
+ # custom method for applying rotary embeddings
269
+ @torch.compiler.disable
270
+ def apply_rotary_custom(self, t: torch.Tensor):
271
+ """Apply rotary embeddings to queries and keys, if k is None, only q is rotated.
272
+ Depending on the freqs type, the rotation will be different."""
273
+ if self.freqs_for == 'lang':
274
+ return self.rotate_queries_or_keys(t, seq_dim=-2)
275
+ elif self.freqs_for == 'pixel':
276
+ return apply_rotary_emb(self.get_axial_freqs(t.shape[-2]), t)
277
+ else:
278
+ raise ValueError(f"freqs_for must be 'lang' or 'pixel', but got {self.freqs_for}")
279
+
280
+
281
+ def test_rotary_embedding_lang():
282
+ d = 32 # d by head
283
+ q = torch.ones(1, 4, 110, 32) # (B, H, T, D) for multi-head attention
284
+ rdim = d // 2 # will do a partial rotation on half, or d
285
+
286
+ rotary = RotaryEmbedding(dim=rdim, freqs_for="lang")
287
+ q = rotary.rotate_queries_or_keys(q, seq_dim=-2)
288
+
289
+ # visualize
290
+ import matplotlib.pyplot as plt
291
+ plt.imshow(q[0, 0, :, :].numpy().T, origin='lower')
292
+
293
+
294
+ def test_rotary_embedding_pixel():
295
+ d = 32 # d by head
296
+ q = torch.ones(1, 4, 128, 32) # (B*T, H, F, C/H) for multi-head attention
297
+ rdim = d // 2 # will do a partial rotation on half
298
+
299
+ rotary = RotaryEmbedding(dim=rdim, freqs_for="pixel", max_freq=10)
300
+ freqs = rotary.get_axial_freqs(128)
301
+
302
+ q = apply_rotary_emb(freqs, q) # also k, if needed
303
+
304
+ # visualize
305
+ import matplotlib.pyplot as plt
306
+ plt.imshow(q[0, 0, :, :].numpy().T, origin='lower')
model/RoPE/__pycache__/RoPE.cpython-310.pyc ADDED
Binary file (8.58 kB). View file
 
model/__pycache__/conformer_helper.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
model/__pycache__/perceiver_helper.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
model/__pycache__/perceiver_mod.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
model/__pycache__/pitchshift_layer.cpython-310.pyc ADDED
Binary file (4.34 kB). View file
 
model/__pycache__/positional_encoding.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
model/__pycache__/t5mod.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
model/__pycache__/t5mod_helper.cpython-310.pyc ADDED
Binary file (3.6 kB). View file
 
model/conv_block.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ from typing import Literal
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+ from einops import rearrange
15
+
16
+
17
+ def init_layer(layer: nn.Module) -> None:
18
+ """Initialize a Linear or Convolutional layer."""
19
+ nn.init.xavier_uniform_(layer.weight)
20
+ if hasattr(layer, "bias") and layer.bias is not None:
21
+ layer.bias.data.zero_()
22
+
23
+
24
+ def init_bn(bn: nn.Module) -> None:
25
+ """Initialize a Batchnorm layer."""
26
+ bn.bias.data.zero_()
27
+ bn.weight.data.fill_(1.0)
28
+ bn.running_mean.data.zero_()
29
+ bn.running_var.data.fill_(1.0)
30
+
31
+
32
+ def act(x: torch.Tensor, activation: str) -> torch.Tensor:
33
+ """Activation function."""
34
+ funcs = {"relu": F.relu_, "leaky_relu": lambda x: F.leaky_relu_(x, 0.01), "swish": lambda x: x * torch.sigmoid(x)}
35
+ return funcs.get(activation, lambda x: Exception("Incorrect activation!"))(x)
36
+
37
+
38
+ class Res2DAVPBlock(nn.Module):
39
+
40
+ def __init__(self, in_channels, out_channels, kernel_size, avp_kernel_size, activation):
41
+ """Convolutional residual block modified fromr bytedance/music_source_separation."""
42
+ super().__init__()
43
+
44
+ padding = kernel_size[0] // 2, kernel_size[1] // 2
45
+
46
+ self.activation = activation
47
+ self.bn1, self.bn2 = nn.BatchNorm2d(out_channels), nn.BatchNorm2d(out_channels)
48
+
49
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=False)
50
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, padding=padding, bias=False)
51
+
52
+ self.is_shortcut = in_channels != out_channels
53
+ if self.is_shortcut:
54
+ self.shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=(1, 1))
55
+
56
+ self.avp = nn.AvgPool2d(avp_kernel_size)
57
+ self.init_weights()
58
+
59
+ def init_weights(self):
60
+ for m in [self.conv1, self.conv2] + ([self.shortcut] if self.is_shortcut else []):
61
+ init_layer(m)
62
+ for m in [self.bn1, self.bn2]:
63
+ init_bn(m)
64
+
65
+ def forward(self, x):
66
+ origin = x
67
+ x = act(self.bn1(self.conv1(x)), self.activation)
68
+ x = self.bn2(self.conv2(x))
69
+ x += self.shortcut(origin) if self.is_shortcut else origin
70
+ x = act(x, self.activation)
71
+ return self.avp(x)
72
+
73
+
74
+ class PreEncoderBlockRes3B(nn.Module):
75
+
76
+ def __init__(self, in_channels, out_channels, kernel_size=(3, 3), avp_kernerl_size=(1, 2), activation='relu'):
77
+ """Pre-Encoder with 3 Res2DAVPBlocks."""
78
+ super().__init__()
79
+
80
+ self.blocks = nn.ModuleList([
81
+ Res2DAVPBlock(in_channels if i == 0 else out_channels, out_channels, kernel_size, avp_kernerl_size,
82
+ activation) for i in range(3)
83
+ ])
84
+
85
+ def forward(self, x): # (B, T, F)
86
+ x = rearrange(x, 'b t f -> b 1 t f')
87
+ for block in self.blocks:
88
+ x = block(x)
89
+ return rearrange(x, 'b c t f -> b t f c')
90
+
91
+
92
+ def test_res3b():
93
+ # mel-spec input
94
+ x = torch.randn(2, 256, 512) # (B, T, F)
95
+ pre = PreEncoderBlockRes3B(in_channels=1, out_channels=128)
96
+ x = pre(x) # (2, 256, 64, 128): B T,F,C
97
+
98
+ x = torch.randn(2, 110, 1024) # (B, T, F)
99
+ pre = PreEncoderBlockRes3B(in_channels=1, out_channels=128)
100
+ x = pre(x) # (2, 110, 128, 128): B,T,F,C
101
+
102
+
103
+ # ====================================================================================================================
104
+ # PreEncoderBlockHFTT: hFT-Transformer-like Pre-encoder
105
+ # ====================================================================================================================
106
+ class PreEncoderBlockHFTT(nn.Module):
107
+
108
+ def __init__(self, margin_pre=15, margin_post=16) -> None:
109
+ """Pre-Encoder with hFT-Transformer-like convolutions."""
110
+ super().__init__()
111
+
112
+ self.margin_pre, self.margin_post = margin_pre, margin_post
113
+ self.conv = nn.Conv2d(1, 4, kernel_size=(1, 5), padding='same', padding_mode='zeros')
114
+ self.emb_freq = nn.Linear(128, 128)
115
+
116
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
117
+ # x: (B, T, F)
118
+ x = rearrange(x, 'b t f -> b 1 f t') # (B, 1, F, T) or (2, 1, 128, 110)
119
+ x = F.pad(x, (self.margin_pre, self.margin_post), value=1e-7) # (B, 1, F, T+margin) or (2,1,128,141)
120
+ x = self.conv(x) # (B, C, F, T+margin) or (2, 4, 128, 141)
121
+ x = x.unfold(dimension=3, size=32, step=1) # (B, c1, T, F, c2) or (2, 4, 128, 110, 32)
122
+ x = rearrange(x, 'b c1 f t c2 -> b t f (c1 c2)') # (B, T, F, C) or (2, 110, 128, 128)
123
+ return self.emb_freq(x) # (B, T, F, C) or (2, 110, 128, 128)
124
+
125
+
126
+ def test_hftt():
127
+ # from model.spectrogram import get_spectrogram_layer_from_audio_cfg
128
+ # from config.config import audio_cfg as default_audio_cfg
129
+ # audio_cfg = default_audio_cfg
130
+ # audio_cfg['codec'] = 'melspec'
131
+ # audio_cfg['hop_length'] = 300
132
+ # audio_cfg['n_mels'] = 128
133
+ # x = torch.randn(2, 1, 32767)
134
+ # mspec, _ = get_spectrogram_layer_from_audio_cfg(audio_cfg)
135
+ # x = mspec(x)
136
+ x = torch.randn(2, 110, 128) # (B, T, F)
137
+ pre_enc_hftt = PreEncoderBlockHFTT()
138
+ y = pre_enc_hftt(x) # (2, 110, 128, 128): B, T, F, C
139
+
140
+
141
+ # ====================================================================================================================
142
+ # PreEncoderBlockRes3BHFTT: hFT-Transformer-like Pre-encoder with Res2DAVPBlock and spec input
143
+ # ====================================================================================================================
144
+ class PreEncoderBlockRes3BHFTT(nn.Module):
145
+
146
+ def __init__(self, margin_pre: int = 15, margin_post: int = 16) -> None:
147
+ """Pre-Encoder with hFT-Transformer-like convolutions.
148
+
149
+ Args:
150
+ margin_pre (int): padding before the input
151
+ margin_post (int): padding after the input
152
+ stack_dim (Literal['c', 'f']): stack dimension. channel or frequency
153
+
154
+ """
155
+ super().__init__()
156
+ self.margin_pre, self.margin_post = margin_pre, margin_post
157
+ self.res3b = PreEncoderBlockRes3B(in_channels=1, out_channels=4)
158
+ self.emb_freq = nn.Linear(128, 128)
159
+
160
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
161
+ # x: (B, T, F) or (2, 110, 1024), input spectrogram
162
+ x = rearrange(x, 'b t f -> b f t') # (2, 1024, 110): B,F,T
163
+ x = F.pad(x, (self.margin_pre, self.margin_post), value=1e-7) # (2, 1024, 141): B,F,T+margin
164
+ x = rearrange(x, 'b f t -> b t f') # (2, 141, 1024): B,T+margin,F
165
+ x = self.res3b(x) # (2, 141, 128, 4): B,T+margin,F,C
166
+ x = x.unfold(dimension=1, size=32, step=1) # (B, T, F, C1, C2) or (2, 110, 128, 4, 32)
167
+ x = rearrange(x, 'b t f c1 c2 -> b t f (c1 c2)') # (B, T, F, C) or (2, 110, 128, 128)
168
+ return self.emb_freq(x) # (B, T, F, C) or (2, 110, 128, 128)
169
+
170
+
171
+ def test_res3b_hftt():
172
+ # from model.spectrogram import get_spectrogram_layer_from_audio_cfg
173
+ # from config.config import audio_cfg as default_audio_cfg
174
+ # audio_cfg = default_audio_cfg
175
+ # audio_cfg['codec'] = 'spec'
176
+ # audio_cfg['hop_length'] = 300
177
+ # x = torch.randn(2, 1, 32767)
178
+ # spec, _ = get_spectrogram_layer_from_audio_cfg(audio_cfg)
179
+ # x = spec(x) # (2, 110, 1024): B,T,F
180
+ x = torch.randn(2, 110, 1024) # (B, T, F)
181
+ pre_enc_res3b_hftt = PreEncoderBlockRes3BHFTT()
182
+ y = pre_enc_res3b_hftt(x) # (2, 110, 128, 128): B, T, F, C
183
+
184
+
185
+ # # ====================================================================================================================
186
+ # # PreEncoderBlockConv1D: Pre-encoder without activation, with Melspec input
187
+ # # ====================================================================================================================
188
+ # class PreEncoderBlockConv1D(nn.Module):
189
+
190
+ # def __init__(self,
191
+ # in_channels,
192
+ # out_channels,
193
+ # kernel_size=3) -> None:
194
+ # """Pre-Encoder with 1D convolution."""
195
+ # super().__init__()
196
+ # self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=1)
197
+ # self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size, stride=1)
198
+
199
+ # def forward(self, x: torch.Tensor) -> torch.Tensor:
200
+ # # x: (B, T, F) or (2, 128, 256), input melspec
201
+ # x = rearrange(x, 'b t f -> b f t') # (2, 256, 128): B,F,T
202
+ # x = self.conv1(x) # (2, 128, 128): B,F,T
203
+ # return rearrange(x, 'b f t -> b t f') # (2, 110, 128): B,T,F
204
+
205
+ # def test_conv1d():
206
+ # # from model.spectrogram import get_spectrogram_layer_from_audio_cfg
207
+ # # from config.config import audio_cfg as default_audio_cfg
208
+ # # audio_cfg = default_audio_cfg
209
+ # # audio_cfg['codec'] = 'melspec'
210
+ # # audio_cfg['hop_length'] = 256
211
+ # # audio_cfg['n_mels'] = 512
212
+ # # x = torch.randn(2, 1, 32767)
213
+ # # mspec, _ = get_spectrogram_layer_from_audio_cfg(audio_cfg)
214
+ # # x = mspec(x)
215
+ # x = torch.randn(2, 128, 128) # (B, T, F)
216
+ # pre_enc_conv1d = PreEncoderBlockConv1D(in_channels=1, out_channels=128)
217
+ # y = pre_enc_conv1d(x) # (2, 110, 128, 128): B, T, F, C
model/lr_scheduler.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """lr_schedule.py"""
11
+ import torch
12
+ from typing import Dict, Optional
13
+
14
+
15
+ def get_lr_scheduler(optimizer: torch.optim.Optimizer, scheduler_name: str, base_lr: float, scheduler_cfg: Dict):
16
+
17
+ if scheduler_name.lower() == 'cosine':
18
+ from torch.optim.lr_scheduler import (
19
+ SequentialLR,
20
+ LinearLR,
21
+ CosineAnnealingLR,
22
+ )
23
+
24
+ scheduler1 = LinearLR(
25
+ optimizer,
26
+ start_factor=0.5,
27
+ end_factor=1,
28
+ total_iters=scheduler_cfg["warmup_steps"],
29
+ last_epoch=-1,
30
+ )
31
+
32
+ scheduler2 = CosineAnnealingLR(
33
+ optimizer,
34
+ T_max=scheduler_cfg["total_steps"] - scheduler_cfg["warmup_steps"],
35
+ eta_min=scheduler_cfg["final_cosine"],
36
+ )
37
+
38
+ lr_scheduler = SequentialLR(optimizer,
39
+ schedulers=[scheduler1, scheduler2],
40
+ milestones=[scheduler_cfg["warmup_steps"]])
41
+ elif scheduler_name.lower() == 'legacy':
42
+ import math
43
+ from torch.optim.lr_scheduler import (
44
+ SequentialLR,
45
+ LinearLR,
46
+ LambdaLR,
47
+ )
48
+
49
+ msg = "You are using T5 legacy LR Schedule, it's independent from the optim.base_lr"
50
+ print(msg)
51
+
52
+ num_steps_optimizer1 = math.ceil(scheduler_cfg["total_steps"] * 0.9)
53
+ iters_left_for_optimizer2 = scheduler_cfg["total_steps"] - num_steps_optimizer1
54
+
55
+ scheduler1 = LambdaLR(optimizer, lambda step: min(base_lr, 1.0 / math.sqrt(step)) / base_lr
56
+ if step else base_lr / base_lr)
57
+
58
+ scheduler2 = LinearLR(optimizer,
59
+ start_factor=(min(base_lr, 1.0 / math.sqrt(num_steps_optimizer1)) / base_lr),
60
+ end_factor=0,
61
+ total_iters=iters_left_for_optimizer2,
62
+ last_epoch=-1)
63
+
64
+ lr_scheduler = SequentialLR(
65
+ optimizer,
66
+ schedulers=[scheduler1, scheduler2],
67
+ milestones=[num_steps_optimizer1],
68
+ )
69
+ elif scheduler_name.lower() == 'constant':
70
+ from transformers import get_scheduler
71
+ lr_scheduler = get_scheduler(
72
+ name=scheduler_name.lower(),
73
+ optimizer=optimizer,
74
+ )
75
+ else:
76
+ raise NotImplementedError
77
+
78
+ return lr_scheduler
79
+
80
+
81
+ def extra_stats(args, model, optimizer):
82
+ stats = {}
83
+
84
+ if args.logging.weights_l2:
85
+ weights_l2 = sum(p.detach().norm(2).item()**2 for p in model.parameters())**0.5
86
+ stats['weights_l2'] = weights_l2
87
+
88
+ cur_lr = optimizer.param_groups[0]['lr']
89
+ stats['lr'] = cur_lr
90
+
91
+ return stats
model/optimizers.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ optimizers.py
2
+
3
+ Code based on nanoT5 project:
4
+ https://github.com/PiotrNawrot/nanoT5/blob/main/nanoT5/utils/copied_utils.py
5
+
6
+ + D-adapt Adam from https://github.com/facebookresearch/dadaptation
7
+ """
8
+ import importlib
9
+ import math
10
+ import torch
11
+
12
+ from typing import Iterable, Tuple
13
+ from torch import nn
14
+ from torch.optim import Optimizer
15
+ from transformers import Adafactor
16
+ from torch.optim import AdamW
17
+
18
+
19
+ class AdamWScale(Optimizer):
20
+ """
21
+ This AdamW implementation is copied from Huggingface.
22
+ We modified it with Adagrad scaling by rms of a weight tensor
23
+
24
+ Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay
25
+ Regularization](https://arxiv.org/abs/1711.05101).
26
+
27
+ Parameters:
28
+ params (`Iterable[nn.parameter.Parameter]`):
29
+ Iterable of parameters to optimize or dictionaries defining parameter groups.
30
+ lr (`float`, *optional*, defaults to 1e-3):
31
+ The learning rate to use.
32
+ betas (`Tuple[float,float]`, *optional*, defaults to (0.9, 0.999)):
33
+ Adam's betas parameters (b1, b2).
34
+ eps (`float`, *optional*, defaults to 1e-6):
35
+ Adam's epsilon for numerical stability.
36
+ weight_decay (`float`, *optional*, defaults to 0):
37
+ Decoupled weight decay to apply.
38
+ correct_bias (`bool`, *optional*, defaults to `True`):
39
+ Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`).
40
+ no_deprecation_warning (`bool`, *optional*, defaults to `False`):
41
+ A flag used to disable the deprecation warning (set to `True` to disable the warning).
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ params: Iterable[nn.parameter.Parameter],
47
+ lr: float = 1e-3,
48
+ betas: Tuple[float, float] = (0.9, 0.999),
49
+ eps: float = 1e-6,
50
+ weight_decay: float = 0.0,
51
+ correct_bias: bool = True,
52
+ ):
53
+ if lr < 0.0:
54
+ raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0")
55
+ if not 0.0 <= betas[0] < 1.0:
56
+ raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)")
57
+ if not 0.0 <= betas[1] < 1.0:
58
+ raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)")
59
+ if not 0.0 <= eps:
60
+ raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0")
61
+ defaults = dict(
62
+ lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
63
+ super().__init__(params, defaults)
64
+
65
+ @staticmethod
66
+ def _rms(tensor):
67
+ return tensor.norm(2) / (tensor.numel()**0.5)
68
+
69
+ def step(self, closure=None):
70
+ """
71
+ Performs a single optimization step.
72
+
73
+ Arguments:
74
+ closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
75
+ """
76
+ loss = None
77
+ if closure is not None:
78
+ loss = closure()
79
+
80
+ for group in self.param_groups:
81
+ for p in group["params"]:
82
+ if p.grad is None:
83
+ continue
84
+ grad = p.grad.data
85
+ if grad.is_sparse:
86
+ raise RuntimeError(
87
+ "Adam does not support sparse gradients, please consider SparseAdam instead"
88
+ )
89
+
90
+ state = self.state[p]
91
+ beta1, beta2 = group["betas"]
92
+
93
+ # State initialization
94
+ if len(state) == 0:
95
+ state["step"] = 0
96
+ # Exponential moving average of gradient values
97
+ state["exp_avg"] = torch.zeros_like(p.data)
98
+ # Exponential moving average of squared gradient values
99
+ state["exp_avg_sq"] = torch.zeros_like(p.data)
100
+
101
+ exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
102
+
103
+ state["step"] += 1
104
+
105
+ # Decay the first and second moment running average coefficient
106
+ # In-place operations to update the averages at the same time
107
+ exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
108
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
109
+ denom = exp_avg_sq.sqrt().add_(group["eps"])
110
+
111
+ step_size = group["lr"]
112
+ if group["correct_bias"]: # No bias correction for Bert
113
+ bias_correction1 = 1.0 - beta1**state["step"]
114
+ bias_correction2 = 1.0 - beta2**state["step"]
115
+ step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
116
+
117
+ # /Adapt Step from Adagrad
118
+ step_size = step_size * max(1e-3, self._rms(p.data))
119
+ # /Adapt Step from Adagrad
120
+
121
+ p.data.addcdiv_(exp_avg, denom, value=-step_size)
122
+
123
+ # Just adding the square of the weights to the loss function is *not*
124
+ # the correct way of using L2 regularization/weight decay with Adam,
125
+ # since that will interact with the m and v parameters in strange ways.
126
+ #
127
+ # Instead we want to decay the weights in a manner that doesn't interact
128
+ # with the m/v parameters. This is equivalent to adding the square
129
+ # of the weights to the loss with plain (non-momentum) SGD.
130
+ # Add weight decay at the end (fixed version)
131
+ if group["weight_decay"] > 0.0:
132
+ p.data.add_(p.data, alpha=(-group["lr"] * group["weight_decay"]))
133
+
134
+ return loss
135
+
136
+
137
+ # def get_optimizer(models_dict: nn.ModuleDict,
138
+ # optimizer_name: str,
139
+ # base_lr: float,
140
+ # weight_decay: float = 0.):
141
+
142
+ # no_decay = [
143
+ # "bias", "LayerNorm", "layernorm", "layer_norm", "ln", "BatchNorm", "bn", "batch_norm",
144
+ # "batchnorm"
145
+ # ]
146
+
147
+
148
+ # optimizer_grouped_parameters = []
149
+ # for name, current_model in models_dict.items():
150
+ # if current_model is None:
151
+ # continue
152
+ # optimizer_grouped_parameters += [
153
+ # {
154
+ # "params": [
155
+ # p for n, p in current_model.named_parameters()
156
+ # if not any(nd in n for nd in no_decay)
157
+ # ],
158
+ # "weight_decay": weight_decay,
159
+ # },
160
+ # {
161
+ # "params": [
162
+ # p for n, p in current_model.named_parameters()
163
+ # if any(nd in n for nd in no_decay)
164
+ # ],
165
+ # "weight_decay": 0.0,
166
+ # },
167
+ # ]
168
+ def get_optimizer(models_dict: nn.ModuleDict,
169
+ optimizer_name: str,
170
+ base_lr: float,
171
+ weight_decay: float = 0.):
172
+
173
+ no_decay = [
174
+ "bias", "LayerNorm", "layernorm", "layer_norm", "ln", "BatchNorm", "bn", "batch_norm",
175
+ "batchnorm"
176
+ ]
177
+ optimizer_grouped_parameters = []
178
+ for n, p in models_dict:
179
+ # drop pitch shifter
180
+ if 'pshifters' in n:
181
+ continue
182
+ # no decay
183
+ if n in no_decay:
184
+ optimizer_grouped_parameters.append({"params": [p], "weight_decay": 0.0})
185
+ else:
186
+ optimizer_grouped_parameters.append({"params": [p], "weight_decay": weight_decay})
187
+
188
+ if optimizer_name.lower() == 'adamw':
189
+ base_lr = 1e-03 if base_lr == None else float(base_lr)
190
+ opt = AdamW(optimizer_grouped_parameters, lr=base_lr)
191
+ elif optimizer_name.lower() == 'adafactor':
192
+ if base_lr == None:
193
+ opt = Adafactor(
194
+ optimizer_grouped_parameters,
195
+ lr=None,
196
+ scale_parameter=True,
197
+ relative_step=True,
198
+ warmup_init=True)
199
+ else:
200
+ opt = Adafactor(optimizer_grouped_parameters, lr=base_lr, relative_step=False)
201
+ elif optimizer_name.lower() == 'adamwscale':
202
+ base_lr = 1e-02 if base_lr == None else float(base_lr)
203
+ opt = AdamWScale(
204
+ optimizer_grouped_parameters,
205
+ lr=base_lr,
206
+ )
207
+ elif optimizer_name.lower() == 'cpuadam':
208
+ dspd = importlib.import_module('deepspeed')
209
+ base_lr = 1e-03 if base_lr == None else float(base_lr)
210
+ opt = dspd.ops.adam.cpu_adam.DeepSpeedCPUAdam(optimizer_grouped_parameters, lr=base_lr)
211
+ elif optimizer_name.lower() == 'dadaptadam':
212
+ dadaptation = importlib.import_module('dadaptation')
213
+ base_lr = 1.0 if base_lr == None else float(base_lr)
214
+ opt = dadaptation.DAdaptAdam(optimizer_grouped_parameters, lr=base_lr)
215
+ else:
216
+ raise NotImplementedError(optimizer_name)
217
+
218
+ return opt, base_lr
model/pitchshift_layer.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """pitchshift.py"""
11
+ # import math
12
+ import numpy as np
13
+ # from scipy import special
14
+ from einops import rearrange
15
+ from typing import Optional, Literal, Dict, List, Tuple, Callable
16
+
17
+ import torch
18
+ from torch import nn
19
+ import torchaudio
20
+ from torchaudio import transforms
21
+ # from torchaudio import functional as F
22
+ # from torchaudio.functional.functional import (
23
+ # _fix_waveform_shape,
24
+ # _stretch_waveform,
25
+ # )
26
+ # from model.ops import adjust_b_to_gcd, check_all_elements_equal
27
+
28
+
29
+ class PitchShiftLayer(nn.Module):
30
+ """Applying batch-wise pitch-shift to time-domain audio signals.
31
+
32
+ Args:
33
+ pshift_range (List[int]): Range of pitch shift in semitones. Default: ``[-2, 2]``.
34
+ resample_source_fs (int): Default is 4000.
35
+ stretch_n_fft (int): Default is 2048.
36
+ window: (Optional[Literal['kaiser']]) Default is None.
37
+ beta: (Optional[float]): Parameter for 'kaiser' filter. Default: None.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ pshift_range: List[int] = [-2, 2],
43
+ resample_source_fs: int = 4000,
44
+ strecth_n_fft: int = 512,
45
+ win_length: Optional[int] = None,
46
+ hop_length: Optional[int] = None,
47
+ window: Optional[Literal['kaiser']] = None,
48
+ beta: Optional[float] = None,
49
+ expected_input_shape: Optional[Tuple[int]] = None,
50
+ device: Optional[torch.device] = None,
51
+ **kwargs,
52
+ ) -> None:
53
+ super().__init__()
54
+ self.pshift_range = pshift_range
55
+ self.resample_source_fs = resample_source_fs
56
+ self.strecth_n_fft = strecth_n_fft
57
+ self.win_length = win_length
58
+ self.hop_length = hop_length
59
+
60
+ if window is None:
61
+ self.window_fn = torch.hann_window
62
+ self.window_kwargs = None
63
+ elif 'kaiser' in window:
64
+
65
+ def custom_kaiser_window(window_length, beta, **kwargs):
66
+ return torch.kaiser_window(window_length, periodic=True, beta=beta, **kwargs)
67
+
68
+ self.window_fn = custom_kaiser_window
69
+ self.window_kwargs = {'beta': beta}
70
+
71
+ # Initialize pitch shifters for every semitone
72
+ self.pshifters = None
73
+ self.frame_gaps = None
74
+ self._initialize_pshifters(expected_input_shape, device=device)
75
+ self.requires_grad_(False)
76
+
77
+ def _initialize_pshifters(self,
78
+ expected_input_shape: Optional[Tuple[int]] = None,
79
+ device: Optional[torch.device] = None) -> None:
80
+ # DDP requires initializing parameters with a dummy input
81
+ if expected_input_shape is not None:
82
+ if device is not None:
83
+ dummy_input = torch.randn(expected_input_shape, requires_grad=False).to(device)
84
+ else:
85
+ dummy_input = torch.randn(expected_input_shape, requires_grad=False)
86
+ else:
87
+ dummy_input = None
88
+
89
+ pshifters = nn.ModuleDict()
90
+ for semitone in range(self.pshift_range[0], self.pshift_range[1] + 1):
91
+ if semitone == 0:
92
+ # No need to shift and resample
93
+ pshifters[str(semitone)] = None
94
+ else:
95
+ pshifter = transforms.PitchShift(self.resample_source_fs,
96
+ n_steps=semitone,
97
+ n_fft=self.strecth_n_fft,
98
+ win_length=self.win_length,
99
+ hop_length=self.hop_length,
100
+ window_fn=self.window_fn,
101
+ wkwargs=self.window_kwargs)
102
+ pshifters[str(semitone)] = pshifter
103
+ # Pass dummy input to initialize parameters
104
+ with torch.no_grad():
105
+ if dummy_input is not None:
106
+ _ = pshifter.initialize_parameters(dummy_input)
107
+ self.pshifters = pshifters
108
+
109
+ def calculate_frame_gaps(self) -> Dict[int, float]:
110
+ """Calculate the expected gap between the original and the stretched audio."""
111
+ frame_gaps = {} # for debugging
112
+ for semitone in range(self.pshift_range[0], self.pshift_range[1] + 1):
113
+ if semitone == 0:
114
+ # No need to shift and resample
115
+ frame_gaps[semitone] = 0.
116
+ else:
117
+ pshifter = self.pshifters[str(semitone)]
118
+ gap_in_ms = 1000. * (pshifter.kernel.shape[2] -
119
+ pshifter.kernel.shape[0] / 2.0**(-float(semitone) / 12)) / self.resample_source_fs
120
+ frame_gaps[semitone] = gap_in_ms
121
+ return frame_gaps
122
+
123
+ @torch.no_grad()
124
+ def forward(self, x: torch.Tensor, semitone: int) -> torch.Tensor:
125
+ """
126
+ Args:
127
+ x (torch.Tensor): (B, 1, T) or (B, T)
128
+ Returns:
129
+ torch.Tensor: (B, 1, T) or (B, T)
130
+ """
131
+ if semitone == 0:
132
+ return x
133
+ elif semitone >= min(self.pshift_range) and semitone <= max(self.pshift_range):
134
+ return self.pshifters[str(semitone)](x)
135
+ else:
136
+ raise ValueError(f"semitone must be in range {self.pshift_range}")
137
+
138
+
139
+ def test_resampler_sinewave():
140
+ # x: {440Hz, 220Hz} sine wave at 16kHz
141
+ t = torch.arange(0, 2, 1 / 16000) # 2 seconds at 16kHz
142
+ x0 = torch.sin(2 * torch.pi * 440 * t) * 0.5
143
+ x1 = torch.sin(2 * torch.pi * 220 * t) * 0.5
144
+ x = torch.stack((x0, x1), dim=0) # (2, 32000)
145
+
146
+ # Resample
147
+ psl = PitchShiftLayer(pshift_range=[-2, 2], resample_source_fs=4000)
148
+ y = psl(x, 2) # (2, 24000)
149
+
150
+ # Export to wav
151
+ torchaudio.save("x.wav", x, 16000, bits_per_sample=16)
152
+ torchaudio.save("y.wav", y, 12000, bits_per_sample=16)
153
+
154
+
155
+ # class Resampler(nn.Module):
156
+ # """
157
+ # Resampling using conv1d operations, more memory-efficient than torchaudio's resampler.
158
+
159
+ # Based on Dan Povey's resampler.py:
160
+ # https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
161
+ # """
162
+
163
+ # def __init__(self,
164
+ # input_sr: int,
165
+ # output_sr: int,
166
+ # dtype: torch.dtype = torch.float32,
167
+ # filter_width: int = 16,
168
+ # cutoff_ratio: float = 0.85,
169
+ # filter: Literal['kaiser', 'kaiser_best', 'kaiser_fast', 'hann'] = 'kaiser_fast',
170
+ # beta: float = 8.555504641634386) -> None:
171
+ # super().__init__() # init the base class
172
+ # """
173
+ # Initialize the Resampler.
174
+
175
+ # Args:
176
+ # - input_sr (int): Input sampling rate.
177
+ # - output_sr (int): Output sampling rate.
178
+ # - dtype (torch.dtype): Computation data type. Default: torch.float32.
179
+ # - filter_width (int): Number of zeros per side in the sinc function. Default: 16.
180
+ # - cutoff_ratio (float): Filter rolloff point as a fraction of Nyquist freq. Default: 0.95.
181
+ # - filter (str): Filter type. One of ['kaiser', 'kaiser_best', 'kaiser_fast', 'hann']. Default: 'kaiser_fast'.
182
+ # - beta (float): Parameter for 'kaiser' filter. Default: 8.555504641634386.
183
+
184
+ # Note: Ratio between input_sr and output_sr should be reduced to simplest form.
185
+ # """
186
+ # assert isinstance(input_sr, int) and isinstance(output_sr, int)
187
+ # if input_sr == output_sr:
188
+ # self.resample_type = 'trivial'
189
+ # return
190
+
191
+ # d = math.gcd(input_sr, output_sr)
192
+ # input_sr, output_sr = input_sr // d, output_sr // d
193
+
194
+ # assert dtype in [torch.float32, torch.float64]
195
+ # assert filter_width > 3 # a reasonable bare minimum
196
+ # np_dtype = np.float32 if dtype == torch.float32 else np.float64
197
+
198
+ # assert filter in ['hann', 'kaiser', 'kaiser_best', 'kaiser_fast']
199
+
200
+ # if filter == 'kaiser_best':
201
+ # filter_width = 64
202
+ # beta = 14.769656459379492
203
+ # cutoff_ratio = 0.9475937167399596
204
+ # filter = 'kaiser'
205
+ # elif filter == 'kaiser_fast':
206
+ # filter_width = 16
207
+ # beta = 8.555504641634386
208
+ # cutoff_ratio = 0.85
209
+ # filter = 'kaiser'
210
+ # """
211
+ # - Define a sample 'block' correlating `input_sr` input samples to `output_sr` output samples.
212
+ # - Dividing samples into these blocks allows corresponding block alignment.
213
+ # - On average, `zeros_per_block` zeros per block are present in the sinc function.
214
+ # """
215
+ # zeros_per_block = min(input_sr, output_sr) * cutoff_ratio
216
+ # """
217
+ # - Define conv kernel size n = (blocks_per_side*2 + 1), adding blocks to each side of the center.
218
+ # - `blocks_per_side` blocks as window radius ensures each central block sample accesses its window.
219
+ # - `blocks_per_side` is determined, rounding up if needed, as 1 + int(filter_width / zeros_per_block).
220
+ # """
221
+ # blocks_per_side = int(np.ceil(filter_width / zeros_per_block))
222
+
223
+ # kernel_width = 2 * blocks_per_side + 1
224
+
225
+ # # Shape of conv1d weights: (out_channels, in_channels, kernel_width)
226
+ # """ Time computations are in units of 1 block, aligning with the `canonical` time axis,
227
+ # since each block has input_sr input samples, adhering to our time unit."""
228
+
229
+ # window_radius_in_blocks = blocks_per_side
230
+ # """`times` will be sinc function arguments, expanding to shape (output_sr, input_sr, kernel_width)
231
+ # via broadcasting. Ensuring t == 0 along the central block diagonal (when input_sr == output_sr)"""
232
+ # times = (
233
+ # np.arange(output_sr, dtype=np_dtype).reshape(
234
+ # (output_sr, 1, 1)) / output_sr - np.arange(input_sr, dtype=np_dtype).reshape(
235
+ # (1, input_sr, 1)) / input_sr - (np.arange(kernel_width, dtype=np_dtype).reshape(
236
+ # (1, 1, kernel_width)) - blocks_per_side))
237
+
238
+ # def hann_window(a):
239
+ # """
240
+ # returning 0.5 + 0.5 cos(a*pi) on [-1,1] and 0 outside.
241
+ # """
242
+ # return np.heaviside(1 - np.abs(a), 0.0) * (0.5 + 0.5 * np.cos(a * np.pi))
243
+
244
+ # def kaiser_window(a, beta):
245
+ # w = special.i0(beta * np.sqrt(np.clip(1 - (
246
+ # (a - 0.0) / 1.0)**2.0, 0.0, 1.0))) / special.i0(beta)
247
+ # return np.heaviside(1 - np.abs(a), 0.0) * w
248
+
249
+ # """The weights are computed as a sinc function times a Hann-window function, normalized by
250
+ # `zeros_per_block` (sinc) and `input_sr` (input function) to maintain integral and magnitude."""
251
+ # if filter == 'hann':
252
+ # weights = (
253
+ # np.sinc(times * zeros_per_block) * hann_window(times / window_radius_in_blocks) *
254
+ # zeros_per_block / input_sr)
255
+ # else:
256
+ # weights = (
257
+ # np.sinc(times * zeros_per_block) *
258
+ # kaiser_window(times / window_radius_in_blocks, beta) * zeros_per_block / input_sr)
259
+
260
+ # self.input_sr = input_sr
261
+ # self.output_sr = output_sr
262
+ # """If output_sr == 1, merge input_sr into kernel_width for weights (shape: output_sr, input_sr,
263
+ # kernel_width) to optimize convolution speed and avoid extra reshaping."""
264
+
265
+ # assert weights.shape == (output_sr, input_sr, kernel_width)
266
+ # if output_sr == 1:
267
+ # self.resample_type = 'integer_downsample'
268
+ # self.padding = input_sr * blocks_per_side
269
+ # weights = torch.tensor(weights, dtype=dtype, requires_grad=False)
270
+ # weights = weights.transpose(1, 2).contiguous().view(1, 1, input_sr * kernel_width)
271
+
272
+ # elif input_sr == 1:
273
+ # # For conv_transpose, use weights as if input_sr and output_sr were swapped, simulating downsampling.
274
+ # self.resample_type = 'integer_upsample'
275
+ # self.padding = output_sr * blocks_per_side
276
+ # weights = torch.tensor(weights, dtype=dtype, requires_grad=False)
277
+ # weights = weights.flip(2).transpose(0,
278
+ # 2).contiguous().view(1, 1, output_sr * kernel_width)
279
+ # else:
280
+ # self.resample_type = 'general'
281
+ # self.reshaped = False
282
+ # self.padding = blocks_per_side
283
+ # weights = torch.tensor(weights, dtype=dtype, requires_grad=False)
284
+
285
+ # self.weights = torch.nn.Parameter(weights, requires_grad=False)
286
+
287
+ # @torch.no_grad()
288
+ # def forward(self, x: torch.Tensor) -> torch.Tensor:
289
+ # """
290
+ # Parameters:
291
+ # - x: torch.Tensor, with shape (minibatch_size, sequence_length), dtype should match the instance's dtype.
292
+
293
+ # Returns:
294
+ # - A torch.Tensor with shape (minibatch_size, (sequence_length//input_sr)*output_sr), dtype matching the input,
295
+ # and content resampled.
296
+ # """
297
+ # if self.resample_type == 'trivial':
298
+ # return x
299
+ # elif self.resample_type == 'integer_downsample':
300
+ # (minibatch_size, seq_len) = x.shape # (B, in_C, L) with in_C == 1
301
+ # x = x.unsqueeze(1)
302
+ # x = torch.nn.functional.conv1d(
303
+ # x, self.weights, stride=self.input_sr, padding=self.padding) # (B, out_C, L)
304
+ # return x.squeeze(1) # (B, L)
305
+
306
+ # elif self.resample_type == 'integer_upsample':
307
+ # x = x.unsqueeze(1)
308
+ # x = torch.nn.functional.conv_transpose1d(
309
+ # x, self.weights, stride=self.output_sr, padding=self.padding)
310
+
311
+ # return x.squeeze(1)
312
+ # else:
313
+ # assert self.resample_type == 'general'
314
+ # (minibatch_size, seq_len) = x.shape
315
+ # num_blocks = seq_len // self.input_sr
316
+ # if num_blocks == 0:
317
+ # # TODO: pad with zeros.
318
+ # raise RuntimeError("Signal is too short to resample")
319
+ # # Truncate input
320
+ # x = x[:, 0:(num_blocks * self.input_sr)].view(minibatch_size, num_blocks, self.input_sr)
321
+ # x = x.transpose(1, 2) # (B, in_C, L)
322
+ # x = torch.nn.functional.conv1d(
323
+ # x, self.weights, padding=self.padding) # (B, out_C, num_blocks)
324
+ # return x.transpose(1, 2).contiguous().view(minibatch_size, num_blocks * self.output_sr)
325
+
326
+ # def test_resampler_sinewave():
327
+ # import torchaudio
328
+ # # x: {440Hz, 220Hz} sine wave at 16kHz
329
+ # t = torch.arange(0, 2, 1 / 16000) # 2 seconds at 16kHz
330
+ # x0 = torch.sin(2 * torch.pi * 440 * t) * 0.5
331
+ # x1 = torch.sin(2 * torch.pi * 220 * t) * 0.5
332
+ # x = torch.stack((x0, x1), dim=0) # (2, 32000)
333
+
334
+ # # Resample
335
+ # resampler = Resampler(input_sr=16000, output_sr=12000)
336
+ # y = resampler(x) # (2, 24000)
337
+
338
+ # # Export to wav
339
+ # torchaudio.save("x.wav", x, 16000, bits_per_sample=16)
340
+ # torchaudio.save("y.wav", y, 12000, bits_per_sample=16)
341
+
342
+ # def test_resampler_music():
343
+ # import torchaudio
344
+ # # x: music at 16kHz
345
+ # x, _ = torchaudio.load("music.wav")
346
+ # slice_length = 32000
347
+ # n_slices = 80
348
+ # slices = [x[0, i * slice_length:(i + 1) * slice_length] for i in range(n_slices)]
349
+ # x = torch.stack(slices) # (80, 32000)
350
+
351
+ # # Resample
352
+ # filter_width = 32
353
+ # resampler = Resampler(16000, 12000, filter_width=filter_width)
354
+ # y = resampler(x) # (80, 24000)
355
+ # y = y.reshape(1, -1) # (1, 1920000)
356
+ # torchaudio.save(f"y_filter_width{filter_width}.wav", y, 12000, bits_per_sample=16)
357
+
358
+ # class PitchShiftLayer(nn.Module):
359
+ # """Applying batch-wise pitch-shift to time-domain audio signals.
360
+
361
+ # Args:
362
+ # expected_input_length (int): Expected input length. Default: ``32767``.
363
+ # pshift_range (List[int]): Range of pitch shift in semitones. Default: ``[-2, 2]``.
364
+ # min_gcd (int): Minimum GCD of input and output sampling rates for resampling. Setting high value can save GPU memory. Default: ``16``.
365
+ # max_timing_error (float): Maximum allowed timing error in seconds. Default: ``0.002``.
366
+ # fs (int): Sample rate of input waveform, x. Default: 16000.
367
+ # bins_per_octave (int, optional): The number of steps per octave (Default : ``12``).
368
+ # n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``).
369
+ # win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``).
370
+ # hop_length (int or None, optional): Length of hop between STFT windows. If None, then ``win_length // 4``
371
+ # is used (Default: ``None``).
372
+ # window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window.
373
+ # If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``).
374
+
375
+ # """
376
+
377
+ # def __init__(
378
+ # self,
379
+ # expected_input_length: int = 32767,
380
+ # pshift_range: List[int] = [-2, 2],
381
+ # min_gcd: int = 16,
382
+ # max_timing_error: float = 0.002,
383
+ # fs: int = 16000,
384
+ # bins_per_octave: int = 12,
385
+ # n_fft: int = 2048,
386
+ # win_length: Optional[int] = None,
387
+ # hop_length: Optional[int] = None,
388
+ # window: Optional[torch.Tensor] = None,
389
+ # filter_width: int = 16,
390
+ # filter: Literal['kaiser', 'kaiser_best', 'kaiser_fast', 'hann'] = 'kaiser_fast',
391
+ # cutoff_ratio: float = 0.85,
392
+ # beta: float = 8.555504641634386,
393
+ # **kwargs,
394
+ # ):
395
+ # super().__init__()
396
+ # self.expected_input_length = expected_input_length
397
+ # self.pshift_range = pshift_range
398
+ # self.min_gcd = min_gcd
399
+ # self.max_timing_error = max_timing_error
400
+ # self.fs = fs
401
+ # self.bins_per_octave = bins_per_octave
402
+ # self.n_fft = n_fft
403
+ # self.win_length = win_length
404
+ # self.hop_length = hop_length
405
+ # self.window = window
406
+ # self.resample_args = {
407
+ # "filter_width": filter_width,
408
+ # "filter": filter,
409
+ # "cutoff_ratio": cutoff_ratio,
410
+ # "beta": beta,
411
+ # }
412
+
413
+ # # Initialize Resamplers
414
+ # self._initialize_resamplers()
415
+
416
+ # def _initialize_resamplers(self):
417
+ # resamplers = nn.ModuleDict()
418
+ # self.frame_gaps = {} # for debugging
419
+ # for i in range(self.pshift_range[0], self.pshift_range[1] + 1):
420
+ # if i == 0:
421
+ # # No need to shift and resample
422
+ # resamplers[str(i)] = None
423
+ # else:
424
+ # # Find optimal reconversion frames meeting the min_gcd
425
+ # stretched_frames, recon_frames, gap = self._find_optimal_reconversion_frames(i)
426
+ # self.frame_gaps[i] = gap
427
+ # resamplers[str(i)] = Resampler(stretched_frames, recon_frames, **self.resample_args)
428
+ # self.resamplers = resamplers
429
+
430
+ # def _find_optimal_reconversion_frames(self, semitone: int):
431
+ # """
432
+ # Find the optimal reconversion frames for a given source sample rate, input length, and semitone for strech.
433
+
434
+ # Parameters:
435
+ # - sr (int): Input audio sample rate, which should be power of 2
436
+ # - n_step (int): The number of pitch-shift steps in semi-tone.
437
+ # - min_gcd (int): The minimum desired GCD, power of 2. Defaults to 16. 16 or 32 are good choices.
438
+ # - max_timing_error (float): The maximum allowed timing error, in seconds. Defaults to 5 ms
439
+
440
+ # Returns:
441
+ # - int: The optimal target sample rate
442
+ # """
443
+ # stretch_rate = 1 / 2.0**(-float(semitone) / self.bins_per_octave)
444
+ # stretched_frames = round(self.expected_input_length * stretch_rate)
445
+
446
+ # gcd = math.gcd(self.expected_input_length, stretched_frames)
447
+ # if gcd >= self.min_gcd:
448
+ # return stretched_frames, self.expected_input_length, 0
449
+ # else:
450
+ # reconversion_frames = adjust_b_to_gcd(stretched_frames, self.expected_input_length,
451
+ # self.min_gcd)
452
+ # gap = reconversion_frames - self.expected_input_length
453
+ # gap_sec = gap / self.fs
454
+ # if gap_sec > self.max_timing_error:
455
+ # # TODO: modifying vocoder of stretch_waveform to adjust pitch-shift rate in cents
456
+ # raise ValueError(
457
+ # gap_sec < self.max_timing_error,
458
+ # f"gap_sec={gap_sec} > max_timing_error={self.max_timing_error} with semitone={semitone}, stretched_frames={stretched_frames}, recon_frames={reconversion_frames}. Try adjusting input lenght or decreasing min_gcd."
459
+ # )
460
+ # else:
461
+ # return stretched_frames, reconversion_frames, gap_sec
462
+
463
+ # @torch.no_grad()
464
+ # def forward(self,
465
+ # x: torch.Tensor,
466
+ # semitone: int,
467
+ # resample: bool = True,
468
+ # fix_shape: bool = True) -> torch.Tensor:
469
+ # """
470
+ # Args:
471
+ # x (torch.Tensor): (B, 1, T)
472
+ # Returns:
473
+ # torch.Tensor: (B, 1, T)
474
+ # """
475
+ # if semitone == 0:
476
+ # return x
477
+ # elif semitone >= min(self.pshift_range) and semitone <= max(self.pshift_range):
478
+ # x = x.squeeze(1) # (B, T)
479
+ # original_x_size = x.size()
480
+ # x = _stretch_waveform(
481
+ # x,
482
+ # semitone,
483
+ # self.bins_per_octave,
484
+ # self.n_fft,
485
+ # self.win_length,
486
+ # self.hop_length,
487
+ # self.window,
488
+ # )
489
+ # if resample:
490
+ # x = self.resamplers[str(semitone)].forward(x)
491
+ # # Fix waveform shape
492
+ # if fix_shape:
493
+ # if x.size(1) != original_x_size[1]:
494
+ # # print(f"Warning: {x.size(1)} != {original_x_length}")
495
+ # x = _fix_waveform_shape(x, original_x_size)
496
+ # return x.unsqueeze(1) # (B, 1, T)
497
+ # else:
498
+ # raise ValueError(f"semitone must be in range {self.pshift_range}")
499
+
500
+ # def test_pitchshift_layer():
501
+ # import torchaudio
502
+ # # music
503
+ # # x, _ = torchaudio.load("music.wav")
504
+ # # slice_length = 32767
505
+ # # n_slices = 80
506
+ # # slices = [x[0, i * slice_length:(i + 1) * slice_length] for i in range(n_slices)]
507
+ # # x = torch.stack(slices).unsqueeze(1) # (80, 1, 32767)
508
+
509
+ # # sine wave
510
+ # t = torch.arange(0, 2.0479, 1 / 16000) # 2.05 seconds at 16kHz
511
+ # x = torch.sin(2 * torch.pi * 440 * t) * 0.5
512
+ # x = x.reshape(1, 1, 32767).tile(80, 1, 1)
513
+
514
+ # # Resample
515
+ # pos = 0
516
+ # ps = PitchShiftLayer(
517
+ # pshift_range=[-3, 4],
518
+ # expected_input_length=32767,
519
+ # fs=16000,
520
+ # min_gcd=16,
521
+ # max_timing_error=0.002,
522
+ # # filter_width=64,
523
+ # filter='kaiser_fast',
524
+ # n_fft=2048)
525
+ # y = []
526
+ # for i in range(-3, 4):
527
+ # y.append(ps(x[[pos], :, :], i, resample=False, fix_shape=False)[0, 0, :])
528
+ # y = torch.cat(y).unsqueeze(0) # (1, 32767 * 7)
529
+ # torchaudio.save("y_2048_kaiser_fast.wav", y, 16000, bits_per_sample=16)
530
+
531
+ # # TorchAudio PitchShifter fopr comparision
532
+ # y_ta = []
533
+ # for i in range(-3, 4):
534
+ # ta_transform = torchaudio.transforms.PitchShift(16000, n_steps=i)
535
+ # y_ta.append(ta_transform(x[[pos], :, :])[0, 0, :])
536
+ # y_ta = torch.cat(y_ta).unsqueeze(0) # (1, 32767 * 7)
537
+ # torchaudio.save("y_ta.wav", y_ta, 16000, bits_per_sample=16)
538
+
539
+ # def test_min_gcd_mem_usage():
540
+ # min_gcd = 16
541
+ # for i in range(-3, 4):
542
+ # stretched_frames = _stretch_waveform(x, i).shape[1]
543
+ # adjusted = adjust_b_to_gcd(stretched_frames, 32767, min_gcd)
544
+ # gcd_val = math.gcd(adjusted, stretched_frames)
545
+ # gap = adjusted - 32767
546
+ # gap_ms = (gap / 16000) * 1000
547
+ # mem_mb = (stretched_frames / gcd_val) * (adjusted / gcd_val) * 3 * 4 / 1000 / 1000
548
+ # print(f'\033[92mmin_gcd={min_gcd}\033[0m', f'ps={i}', f'frames={stretched_frames}',
549
+ # f'adjusted_frames={adjusted}', f'gap={gap}', f'\033[91mgap_ms={gap_ms}\033[0m',
550
+ # f'gcd={gcd_val}', f'mem_MB={mem_mb}')
model/positional_encoding.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """positional_encoding.py """
2
+ from typing import Optional, Literal
3
+ from inspect import isfunction
4
+ from math import log, log2, pi, floor
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch import nn, einsum
10
+ from einops import rearrange, repeat
11
+
12
+ from model.RoPE.RoPE import RotaryEmbedding
13
+
14
+
15
+ class AlibiPositionalBias(nn.Module):
16
+ """
17
+ Alibi Positional Bias for Transformer Attention
18
+ : modified to support trainalbe slope similar to "little bird" paper, based on
19
+ https://github.com/lucidrains/x-transformers/
20
+ https://github.com/ofirpress/attention_with_linear_biases/issues/5
21
+
22
+ This is Alibi positional bias extension for:
23
+ - bi-directional self/cross attention
24
+ - supporting extrapolation.
25
+
26
+ References:
27
+ Ofir, Noah A. Smith, and Mike Lewis. "Train short, test long: Attention with linear
28
+ biases enables input length extrapolation." arXiv preprint arXiv:2108.12409 (2021).
29
+
30
+ Lee, Minchul, Kijong Han, and Myeong Cheol Shin. "LittleBird: Efficient Faster & Longer
31
+ Transformer for Question Answering." arXiv preprint arXiv:2210.11870 (2022).
32
+ """
33
+
34
+ def __init__(self,
35
+ heads: int = 8,
36
+ total_heads: int = 8,
37
+ trainable_slope: bool = False,
38
+ trainable_slope_init: Literal['random', 'log'] = 'random',
39
+ **kwargs) -> None:
40
+ super().__init__()
41
+ self.heads = heads # number of heads to be activated
42
+ self.total_heads = total_heads # number of heads in attention module
43
+ self.trainable_slope = trainable_slope
44
+ self.trainable_slope_init = trainable_slope_init
45
+
46
+ if trainable_slope:
47
+ self.slopes = nn.Parameter(torch.Tensor(heads, 1, 1), requires_grad=True)
48
+ else:
49
+ slopes = torch.Tensor(self._get_slopes(heads))
50
+ slopes = rearrange(slopes, 'h -> h 1 1')
51
+ self.register_buffer('slopes', slopes, persistent=False)
52
+
53
+ self.register_buffer('bias', None, persistent=False)
54
+
55
+ def reset_parameters(self) -> None:
56
+ if self.trainable_slope:
57
+ if self.trainable_slope_init == 'random':
58
+ nn.init.normal_(self.slopes, -2, 1)
59
+ else:
60
+ raise NotImplementedError(f'Unknown trainable_slope_init: {self.trainable_slope_init}')
61
+
62
+ def get_bias(self, i, j, device):
63
+ i_arange = torch.arange(j - i, j, device=device)
64
+ j_arange = torch.arange(j, device=device)
65
+ bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
66
+ return bias
67
+
68
+ @staticmethod
69
+ def _get_slopes(heads):
70
+
71
+ def get_slopes_power_of_2(n):
72
+ start = (2**(-2**-(log2(n) - 3)))
73
+ ratio = start
74
+ return [start * ratio**i for i in range(n)]
75
+
76
+ if log2(heads).is_integer():
77
+ return get_slopes_power_of_2(heads)
78
+
79
+ closest_power_of_2 = 2**floor(log2(heads))
80
+ return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(
81
+ 2 * closest_power_of_2)[0::2][:heads - closest_power_of_2]
82
+
83
+ @staticmethod
84
+ def pad_at_dim(t, pad, dim=-1, value=0.):
85
+ dims_from_right = (-dim - 1) if dim < 0 else (t.ndim - dim - 1)
86
+ zeros = ((0, 0) * dims_from_right)
87
+ return F.pad(t, (*zeros, *pad), value=value)
88
+
89
+ @property
90
+ def device(self):
91
+ if self.trainable_slope:
92
+ return self.slopes.device
93
+ else:
94
+ return next(self.buffers()).device
95
+
96
+ def forward(self, i, j):
97
+ """
98
+ Args:
99
+ i (int): end index of query
100
+ j (int): end index of key
101
+
102
+ Returns:
103
+ torch.Tensor: (num_total_heads, i, j) positional bias for each head
104
+
105
+ Usage:
106
+ >>> alibi_bias = AlibiPositionalBias(heads=8, total_heads=8, trainable_slope=False)
107
+ >>> pos_bias = alibi_bias(len(q), len(k))
108
+ >>> q_dot_k = ...
109
+ >>> q_dot_k += pos_bias
110
+ >>> q_dot_k = q_dot_k.softmax(dim=-1)
111
+
112
+ """
113
+ h, device = self.total_heads, self.device
114
+ if self.trainable_slope:
115
+ if self.bias is not None and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
116
+ bias = self.bias[..., :i, :j]
117
+ else:
118
+ bias = self.get_bias(i, j, device)
119
+ num_heads_unalibied = h - bias.shape[0]
120
+ bias = self.pad_at_dim(bias, (0, num_heads_unalibied), dim=0)
121
+ self.register_buffer('bias', bias, persistent=False)
122
+
123
+ return self.bias * torch.sigmoid(self.slopes)
124
+
125
+ else:
126
+ if self.bias is not None and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i:
127
+ return self.bias[..., :i, :j]
128
+
129
+ bias = self.get_bias(i, j, device)
130
+ bias = bias * self.slopes
131
+
132
+ num_heads_unalibied = h - bias.shape[0]
133
+ bias = self.pad_at_dim(bias, (0, num_heads_unalibied), dim=0)
134
+ self.register_buffer('bias', bias, persistent=False)
135
+
136
+ return self.bias
137
+
138
+
139
+ class FixedSinusoidalPositionalEmbedding(nn.Embedding):
140
+ """
141
+ Sinusoidal Absolute Positional Embeddings (APE) of any length.
142
+
143
+ Adapted from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding
144
+
145
+ """
146
+
147
+ def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
148
+ super().__init__(num_positions, embedding_dim)
149
+ self.weight = self._init_weight(self.weight)
150
+
151
+ @staticmethod
152
+ def _init_weight(out: nn.Parameter):
153
+ """
154
+ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
155
+ the 2nd half of the vector. [dim // 2:]
156
+ """
157
+ n_pos, dim = out.shape
158
+ position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)
159
+ ])
160
+ out.requires_grad = False # set early to avoid an error in pytorch-1.8+
161
+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
162
+ out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
163
+ out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
164
+ out.detach_()
165
+ return out
166
+
167
+ @torch.no_grad()
168
+ def forward(self, seq_len: int, past_key_values_length: int = 0):
169
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
170
+ positions = torch.arange(past_key_values_length,
171
+ past_key_values_length + seq_len,
172
+ dtype=torch.long,
173
+ device=self.weight.device)
174
+ return super().forward(positions)
175
+
176
+
177
+ class Wav2Vec2ConformerRotaryPositionalEmbedding(nn.Module):
178
+ """Rotary positional embedding
179
+ Reference : https://blog.eleuther.ai/rotary-embeddings/ Paper: https://arxiv.org/pdf/2104.09864.pdf
180
+ """
181
+
182
+ def __init__(self, config):
183
+ super().__init__()
184
+ dim = config.d_model // config.num_heads
185
+ base = config.rotary_embedding_base
186
+
187
+ inv_freq = 1.0 / (base**(torch.arange(0, dim, 2).float() / dim))
188
+ self.register_buffer("inv_freq", inv_freq)
189
+ self.cached_sequence_length = None
190
+ self.cached_rotary_positional_embedding = None
191
+
192
+ def forward(self, hidden_states):
193
+ sequence_length = hidden_states.shape[1]
194
+
195
+ if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
196
+ return self.cached_rotary_positional_embedding
197
+
198
+ self.cached_sequence_length = sequence_length
199
+ time_stamps = torch.arange(sequence_length).type_as(self.inv_freq)
200
+ freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
201
+ embeddings = torch.cat((freqs, freqs), dim=-1)
202
+
203
+ cos_embeddings = embeddings.cos()[:, None, None, :]
204
+ sin_embeddings = embeddings.sin()[:, None, None, :]
205
+ self.cached_rotary_positional_embedding = torch.stack([cos_embeddings, sin_embeddings])
206
+ return self.cached_rotary_positional_embedding
207
+
208
+
209
+ class Wav2Vec2ConformerRelPositionalEmbedding(nn.Module):
210
+ """Relative positional encoding module."""
211
+
212
+ def __init__(self, config):
213
+ super().__init__()
214
+ self.max_len = config.num_max_positions
215
+ self.d_model = config.d_model
216
+ self.pe = None
217
+ self.extend_pe(torch.tensor(0.0).expand(1, self.max_len))
218
+
219
+ def extend_pe(self, x):
220
+ # Reset the positional encodings
221
+ if self.pe is not None:
222
+ # self.pe contains both positive and negative parts
223
+ # the length of self.pe is 2 * input_len - 1
224
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
225
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
226
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
227
+ return
228
+ # Suppose `i` is the position of query vector and `j` is the
229
+ # position of key vector. We use positive relative positions when keys
230
+ # are to the left (i>j) and negative relative positions otherwise (i<j).
231
+ pe_positive = torch.zeros(x.size(1), self.d_model)
232
+ pe_negative = torch.zeros(x.size(1), self.d_model)
233
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
234
+ div_term = torch.exp(torch.arange(0, self.d_model, 2, dtype=torch.float32) * -(log(10000.0) / self.d_model))
235
+ pe_positive[:, 0::2] = torch.sin(position * div_term)
236
+ pe_positive[:, 1::2] = torch.cos(position * div_term)
237
+ pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
238
+ pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
239
+
240
+ # Reverse the order of positive indices and concat both positive and
241
+ # negative indices. This is used to support the shifting trick
242
+ # as in https://arxiv.org/abs/1901.02860
243
+ pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
244
+ pe_negative = pe_negative[1:].unsqueeze(0)
245
+ pe = torch.cat([pe_positive, pe_negative], dim=1)
246
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
247
+
248
+ def forward(self, hidden_states: torch.Tensor):
249
+ self.extend_pe(hidden_states)
250
+ start_idx = self.pe.size(1) // 2 - hidden_states.size(1) + 1
251
+ end_idx = self.pe.size(1) // 2 + hidden_states.size(1)
252
+ relative_position_embeddings = self.pe[:, start_idx:end_idx]
253
+
254
+ return relative_position_embeddings
255
+
256
+
257
+ #================================================================================================
258
+ # Rotary Positional Embedding
259
+ #================================================================================================
260
+ def get_rotary_emb(d_by_head: int,
261
+ freqs_for: Literal["l", "lang", "p", "pixel"],
262
+ partial_pe: bool = False,
263
+ learned_freq: bool = False):
264
+ if partial_pe is True:
265
+ rdim = d_by_head // 2
266
+ else:
267
+ rdim = d_by_head
268
+
269
+ if freqs_for in ["l", "lang"]:
270
+ freqs_for = "lang"
271
+ elif freqs_for in ["p", "pixel"]:
272
+ freqs_for = "pixel"
273
+ else:
274
+ raise ValueError(f"freqs_for must be 'l' or 'lang' or 'p' or 'pixel', but got {freqs_for}")
275
+ return RotaryEmbedding(dim=rdim, freqs_for=freqs_for, learned_freq=learned_freq)
276
+
277
+
278
+ def test_rotary_embedding_lang():
279
+ d = 128
280
+ num_heads = 8
281
+ d_by_head = d // num_heads
282
+
283
+ rotary = get_rotary_emb(d_by_head, freqs_for="lang", partial_pe=False, learned_freq=False)
284
+ q = torch.ones(1, 8, 110, d_by_head)
285
+ q = rotary.apply_rotary_custom(q)
286
+
287
+ import matplotlib.pyplot as plt
288
+ plt.imshow(q[0, 0, :, :].detach().numpy().T, origin='lower')
model/spectrogram.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """spectrogram.py"""
11
+ import importlib
12
+ from typing import Optional, Literal, Dict, Tuple
13
+ from packaging.version import parse as VersionParse
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ from einops import rearrange
18
+ from model.ops import minmax_normalize
19
+ from config.config import audio_cfg as default_audio_cfg
20
+ """
21
+ Example usage:
22
+
23
+ # MT3 setup
24
+ >>> hop = 8 ms or 128 samples
25
+ >>> melspec = Melspectrogram(sample_rate=16000, n_fft=2048, hop_length=128,
26
+ f_min=50, f_max=8000, n_mels=512)
27
+ >>> x = torch.randn(2, 1, 32767) # (B, C=1, T): 2.048 s
28
+ >>> y = melspec(x) # (2, 256, 512) (B, T, F)
29
+
30
+ # PerceiverTF-like setup
31
+ >>> hop = 18.75 ms or 300 samples
32
+ >>> spec = Spectrogram(n_fft=2048, hop_length=300)
33
+ )
34
+ >>> x = torch.randn(2, 1, 95999) # (B, C=1, T): 6.000 s
35
+ >>> y = spec(x) # (2, 320, 1024) (B, T, F)
36
+
37
+ # Hybrid setup (2.048 seconds segment and spectrogram with hop=300)
38
+ >>> hop = 18.75 ms or 300 samples
39
+ >>> spec = Spectrogram(n_fft=2048, hop_length=300)
40
+ >>> x = torch.randn(2, 1, 32767) # (B, C=1, T): 2.048 s
41
+ >>> y = spec(x) # (2, 110, 1024) (B, T, F)
42
+
43
+ # PerceiverTF-like setup, hop=256
44
+ >>> hop = 16 ms or 256 samples
45
+ >>> spec256 = Spectrogram(sample_rate=16000, n_fft=2048, hop_length=256,
46
+ f_min=20, f_max=8000, n_mels=256)
47
+ >>> x = torch.randn(2, 1, 32767) # (B, C=1, T): 2.048 s
48
+ >>> y = spec256(x) # (2, 128, 1024) (B, T, F)
49
+ """
50
+
51
+
52
+ def optional_compiler_disable(func):
53
+ if VersionParse(torch.__version__) >= VersionParse("2.1"):
54
+ # If the version is 2.1 or higher, apply the torch.compiler.disable decorator.
55
+ return torch.compiler.disable(func)
56
+ else:
57
+ # If the version is below 2.1, return the original function.
58
+ return func
59
+
60
+
61
+ # -------------------------------------------------------------------------------------
62
+ # Log-Mel spectrogram
63
+ # -------------------------------------------------------------------------------------
64
+ class Melspectrogram(nn.Module):
65
+
66
+ def __init__(
67
+ self,
68
+ audio_backend: Literal['torchaudio', 'nnaudio'] = 'torchaudio',
69
+ sample_rate: int = 16000,
70
+ n_fft: int = 2048,
71
+ hop_length: int = 128,
72
+ f_min: int = 50, # 20 Hz in the MT3 paper, but we can only use 20 Hz with nnAudio
73
+ f_max: Optional[int] = 8000,
74
+ n_mels: int = 512,
75
+ eps: float = 1e-5,
76
+ **kwargs,
77
+ ):
78
+ """
79
+ Log-Melspectrogram
80
+
81
+ Args:
82
+ audio_backend (str): 'torchaudio' or 'nnaudio'
83
+ sample_rate (int): sample rate in Hz
84
+ n_fft (int): FFT window size
85
+ hop_length (int): hop length in samples
86
+ f_min (int): minimum frequency in Hz
87
+ f_max (int): maximum frequency in Hz
88
+ n_mels (int): number of mel frequency bins
89
+ eps (float): epsilon for numerical stability
90
+
91
+ """
92
+ super(Melspectrogram, self).__init__()
93
+ self.audio_backend = audio_backend.lower()
94
+
95
+ if audio_backend.lower() == 'torchaudio':
96
+ torchaudio = importlib.import_module('torchaudio')
97
+ self.mel_stft = torchaudio.transforms.MelSpectrogram(
98
+ sample_rate=sample_rate,
99
+ n_fft=n_fft,
100
+ hop_length=hop_length,
101
+ f_min=f_min,
102
+ f_max=f_max,
103
+ n_mels=n_mels,
104
+ )
105
+ elif audio_backend.lower() == 'nnaudio':
106
+ nnaudio = importlib.import_module('nnAudio.features')
107
+ self.mel_stft_nnaudio = nnaudio.mel.MelSpectrogram(
108
+ sr=sample_rate,
109
+ win_length=n_fft,
110
+ n_mels=n_mels,
111
+ hop_length=hop_length,
112
+ fmin=20, #f_min,
113
+ fmax=f_max)
114
+ else:
115
+ raise NotImplementedError(audio_backend)
116
+ self.eps = eps
117
+
118
+ @optional_compiler_disable
119
+ def forward(self, x: torch.Tensor) -> torch.Tensor: # (B, 1, T)
120
+ """
121
+ Args:
122
+ x (torch.Tensor): (B, 1, T)
123
+
124
+ Returns:
125
+ torch.Tensor: (B, T, F)
126
+
127
+ """
128
+ if self.audio_backend == 'torchaudio':
129
+ x = self.mel_stft(x) # (B, 1, F, T)
130
+ x = rearrange(x, 'b 1 f t -> b t f')
131
+ x = minmax_normalize(torch.log(x + self.eps))
132
+ # some versions of torchaudio returns nan when input is all-zeros
133
+ return torch.nan_to_num(x)
134
+
135
+ elif self.audio_backend == 'nnaudio':
136
+ x = self.mel_stft_nnaudio(x) # (B, F, T)
137
+ x = rearrange(x, 'b f t -> b t f')
138
+ x = minmax_normalize(torch.log(x + self.eps))
139
+ return x
140
+
141
+
142
+ # -------------------------------------------------------------------------------------
143
+ # Log-spectrogram
144
+ # -------------------------------------------------------------------------------------
145
+ class Spectrogram(nn.Module):
146
+
147
+ def __init__(
148
+ self,
149
+ audio_backend: Literal['torchaudio', 'nnaudio'] = 'torchaudio',
150
+ n_fft: int = 2048,
151
+ hop_length: int = 128,
152
+ eps: float = 1e-5,
153
+ **kwargs,
154
+ ):
155
+ """
156
+ Log-Magnitude Spectrogram
157
+
158
+ Args:
159
+ audio_backend (str): 'torchaudio' or 'nnaudio'
160
+ n_fft (int): FFT window size, creates n_fft // 2 + 1 freq-bins
161
+ hop_length (int): hop length in samples
162
+ eps (float): epsilon for numerical stability
163
+
164
+ """
165
+ super(Spectrogram, self).__init__()
166
+ self.audio_backend = audio_backend.lower()
167
+
168
+ if audio_backend.lower() == 'torchaudio':
169
+ torchaudio = importlib.import_module('torchaudio')
170
+ self.stft = torchaudio.transforms.Spectrogram(n_fft=n_fft,
171
+ hop_length=hop_length,
172
+ window_fn=torch.hann_window,
173
+ power=1.) # (B, 1, F, T), remove DC component
174
+ elif audio_backend.lower() == 'nnaudio':
175
+ # TODO: nnAudio spectrogram
176
+ raise NotImplementedError(audio_backend)
177
+ else:
178
+ raise NotImplementedError(audio_backend)
179
+ self.eps = eps
180
+
181
+ @optional_compiler_disable
182
+ def forward(self, x: torch.Tensor) -> torch.Tensor: # (B, 1, T)
183
+ """
184
+ Args:
185
+ x (torch.Tensor): (B, 1, T)
186
+
187
+ Returns:
188
+ torch.Tensor: (B, T, F)
189
+
190
+ """
191
+ if self.audio_backend == 'torchaudio':
192
+ x = self.stft(x)[:, :, 1:, :] # (B, 1, F, T) remove DC component
193
+ x = rearrange(x, 'b 1 f t -> b t f')
194
+ x = minmax_normalize(torch.log(x + self.eps))
195
+ return torch.nan_to_num(x) # some versions of torchaudio returns nan when input is all-zeros
196
+ elif self.audio_backend == 'nnaudio':
197
+ raise NotImplementedError(self.audio_backend)
198
+
199
+
200
+ def get_spectrogram_layer_from_audio_cfg(audio_cfg: Optional[Dict] = None) -> Tuple[nn.Module, Tuple[int]]:
201
+ """Get mel-/spectrogram layer from config.
202
+ - Used by 'ymt3' to create a spectrogram layer.
203
+ - Returns output shape of spectrogram layer, which is used to determine input shape of model.
204
+
205
+ Args:
206
+ audio_cfg (dict): see config/config.py
207
+
208
+ Returns:
209
+ layer (nn.Module): mel-/spectrogram layer
210
+ output_shape (tuple): inferred output shape of layer excluding batch dim. (T, F)
211
+ """
212
+ if audio_cfg is None:
213
+ audio_cfg = default_audio_cfg
214
+
215
+ if audio_cfg['codec'] == 'melspec':
216
+ layer = Melspectrogram(**audio_cfg)
217
+ elif audio_cfg['codec'] == 'spec':
218
+ layer = Spectrogram(**audio_cfg)
219
+ else:
220
+ raise NotImplementedError(audio_cfg['codec'])
221
+
222
+ # Infer output shape of the spectrogram layer
223
+ with torch.no_grad():
224
+ output_shape = layer(torch.randn(1, 1, audio_cfg['input_frames'])).shape[1:]
225
+ return layer, output_shape
model/t5mod.py ADDED
@@ -0,0 +1,687 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ # ==============================================================================
11
+ # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
12
+ #
13
+ # Licensed under the Apache License, Version 2.0 (the "License");
14
+ # you may not use this file except in compliance with the License.
15
+ # You may obtain a copy of the License at
16
+ #
17
+ # http://www.apache.org/licenses/LICENSE-2.0
18
+ #
19
+ # Unless required by applicable law or agreed to in writing, software
20
+ # distributed under the License is distributed on an "AS IS" BASIS,
21
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22
+ # See the License for the specific language governing permissions and
23
+ # limitations under the License.
24
+ import copy
25
+ from typing import Optional, Tuple, Union, Dict
26
+ from einops import rearrange
27
+ from model.ops import count_parameters
28
+
29
+ import torch
30
+ from torch import nn
31
+ from torch.utils.checkpoint import checkpoint
32
+ from transformers.utils import logging
33
+ from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
34
+ from transformers.models.t5.modeling_t5 import (T5LayerNorm, T5LayerSelfAttention, T5LayerCrossAttention, T5LayerFF)
35
+ from transformers.modeling_outputs import (BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions)
36
+ from transformers import T5Config, T5PreTrainedModel
37
+ from model.positional_encoding import FixedSinusoidalPositionalEmbedding
38
+ from model.ff_layer import get_ff_layer
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ class T5BlockYMT3(nn.Module):
44
+ """T5 Block, modified to allow using different types of FF layers."""
45
+
46
+ def __init__(self, config, has_relative_attention_bias=False):
47
+ super().__init__()
48
+ self.is_decoder = config.is_decoder
49
+ self.layer = nn.ModuleList()
50
+ self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
51
+ if self.is_decoder:
52
+ self.layer.append(T5LayerCrossAttention(config))
53
+
54
+ # FF layer
55
+ if config.ff_layer_type == 't5_gmlp':
56
+ self.layer.append(T5LayerFF(config))
57
+ elif config.ff_layer_type == 'moe':
58
+ config.moe_num_experts = 8
59
+ config.moe_topk = 2
60
+ config.hidden_act = 'silu'
61
+ moe = get_ff_layer(config, input_size=config.d_model, widening_factor=config.ff_widening_factor)
62
+ self.layer.append(moe)
63
+ else:
64
+ raise ValueError(f"Unknown FF layer type: {config.ff_layer_type}.")
65
+ self.ff_layer_type = config.ff_layer_type
66
+
67
+ def forward(
68
+ self,
69
+ hidden_states,
70
+ attention_mask=None,
71
+ position_bias=None,
72
+ encoder_hidden_states=None,
73
+ encoder_attention_mask=None,
74
+ encoder_decoder_position_bias=None,
75
+ layer_head_mask=None,
76
+ cross_attn_layer_head_mask=None,
77
+ past_key_value=None,
78
+ use_cache=False,
79
+ output_attentions=False,
80
+ return_dict=True,
81
+ ):
82
+ if past_key_value is not None:
83
+ if not self.is_decoder:
84
+ logger.warning("`past_key_values` is passed to the encoder. Please make sure this is intended.")
85
+ expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
86
+
87
+ if len(past_key_value) != expected_num_past_key_values:
88
+ raise ValueError(
89
+ f"There should be {expected_num_past_key_values} past states. "
90
+ f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}"
91
+ f"Got {len(past_key_value)} past key / value states")
92
+
93
+ self_attn_past_key_value = past_key_value[:2]
94
+ cross_attn_past_key_value = past_key_value[2:]
95
+ else:
96
+ self_attn_past_key_value, cross_attn_past_key_value = None, None
97
+
98
+ self_attention_outputs = self.layer[0](
99
+ hidden_states,
100
+ attention_mask=attention_mask,
101
+ position_bias=position_bias,
102
+ layer_head_mask=layer_head_mask,
103
+ past_key_value=self_attn_past_key_value,
104
+ use_cache=use_cache,
105
+ output_attentions=output_attentions,
106
+ )
107
+ hidden_states, present_key_value_state = self_attention_outputs[:2]
108
+ attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
109
+
110
+ # clamp inf values to enable fp16 training
111
+ if hidden_states.dtype == torch.float16:
112
+ clamp_value = torch.where(
113
+ torch.isinf(hidden_states).any(),
114
+ torch.finfo(hidden_states.dtype).max - 1000,
115
+ torch.finfo(hidden_states.dtype).max,
116
+ )
117
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
118
+
119
+ do_cross_attention = self.is_decoder and encoder_hidden_states is not None
120
+ if do_cross_attention:
121
+ # the actual query length is unknown for cross attention
122
+ # if using past key value states. Need to inject it here
123
+ if present_key_value_state is not None:
124
+ query_length = present_key_value_state[0].shape[2]
125
+ else:
126
+ query_length = None
127
+
128
+ cross_attention_outputs = self.layer[1](
129
+ hidden_states,
130
+ key_value_states=encoder_hidden_states,
131
+ attention_mask=encoder_attention_mask,
132
+ position_bias=encoder_decoder_position_bias,
133
+ layer_head_mask=cross_attn_layer_head_mask,
134
+ past_key_value=cross_attn_past_key_value,
135
+ query_length=query_length,
136
+ use_cache=use_cache,
137
+ output_attentions=output_attentions,
138
+ )
139
+ hidden_states = cross_attention_outputs[0]
140
+
141
+ # clamp inf values to enable fp16 training
142
+ if hidden_states.dtype == torch.float16:
143
+ clamp_value = torch.where(
144
+ torch.isinf(hidden_states).any(),
145
+ torch.finfo(hidden_states.dtype).max - 1000,
146
+ torch.finfo(hidden_states.dtype).max,
147
+ )
148
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
149
+
150
+ # Combine self attn and cross attn key value states
151
+ if present_key_value_state is not None:
152
+ present_key_value_state = present_key_value_state + cross_attention_outputs[1]
153
+
154
+ # Keep cross-attention outputs and relative position weights
155
+ attention_outputs = attention_outputs + cross_attention_outputs[2:]
156
+
157
+ # Apply Feed Forward layer - Modified for MoE
158
+ if self.ff_layer_type == 't5_gmlp':
159
+ hidden_states = self.layer[-1](hidden_states)
160
+ elif self.ff_layer_type == 'moe':
161
+ hidden_states = hidden_states + self.layer[-1](hidden_states)[0] # residual connection outside the MoE
162
+ else:
163
+ raise ValueError(f"Unknown FF layer type: {self.ff_layer_type}.")
164
+
165
+ # clamp inf values to enable fp16 training
166
+ if hidden_states.dtype == torch.float16:
167
+ clamp_value = torch.where(
168
+ torch.isinf(hidden_states).any(),
169
+ torch.finfo(hidden_states.dtype).max - 1000,
170
+ torch.finfo(hidden_states.dtype).max,
171
+ )
172
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
173
+
174
+ outputs = (hidden_states,)
175
+
176
+ if use_cache:
177
+ outputs = outputs + (present_key_value_state,) + attention_outputs
178
+ else:
179
+ outputs = outputs + attention_outputs
180
+
181
+ return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
182
+
183
+
184
+ class T5StackYMT3(T5PreTrainedModel):
185
+ """
186
+ T5Stack, modified for YMT3 with:
187
+ - absolute sinusoidal absolute positional encoding
188
+ """
189
+
190
+ def __init__(
191
+ self,
192
+ config,
193
+ ):
194
+ super().__init__(config)
195
+ self.is_decoder = config.is_decoder
196
+
197
+ # Positional encoding (modified)
198
+ self.use_t5_trainable_pe = False
199
+ self.additive_pe = None
200
+
201
+ pos_enc_type = getattr(config, 'position_encoding_type', 'sinusoidal')
202
+ if pos_enc_type in ['sinusoidal']:
203
+ self.additive_pe = FixedSinusoidalPositionalEmbedding(config.num_max_positions,
204
+ embedding_dim=config.d_model)
205
+ self.block = nn.ModuleList(
206
+ [T5BlockYMT3(config, has_relative_attention_bias=False) for i in range(config.num_layers)])
207
+ elif pos_enc_type == 'trainable':
208
+ self.use_t5_trainable_pe = True
209
+ # Stack blocks
210
+ self.block = nn.ModuleList(
211
+ [T5BlockYMT3(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)])
212
+
213
+ self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
214
+ self.dropout = nn.Dropout(config.dropout_rate)
215
+
216
+ # Initialize weights and apply final processing
217
+ self.post_init()
218
+ # Model parallel
219
+ self.gradient_checkpointing = False
220
+
221
+ def forward(
222
+ self,
223
+ # input_ids=None,
224
+ inputs_embeds=None,
225
+ attention_mask=None,
226
+ encoder_hidden_states=None,
227
+ encoder_attention_mask=None,
228
+ head_mask=None,
229
+ cross_attn_head_mask=None,
230
+ past_key_values=None,
231
+ use_cache=None,
232
+ output_attentions=None,
233
+ output_hidden_states=None,
234
+ return_dict=None,
235
+ ):
236
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
237
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
238
+ output_hidden_states = (output_hidden_states
239
+ if output_hidden_states is not None else self.config.output_hidden_states)
240
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
241
+
242
+ if inputs_embeds is not None:
243
+ input_shape = inputs_embeds.size()[:-1]
244
+ else:
245
+ err_msg_prefix = "decoder_" if self.is_decoder else ""
246
+ raise ValueError(f"You have to specify {err_msg_prefix}inputs_embeds")
247
+
248
+ batch_size, seq_length = input_shape
249
+
250
+ # required mask seq length can be calculated via length of past
251
+ mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
252
+
253
+ # mod: required for additive PE
254
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
255
+
256
+ if use_cache is True:
257
+ assert self.is_decoder, f"`use_cache` can only be set to `True` if {self} is used as a decoder"
258
+
259
+ if attention_mask is None:
260
+ attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
261
+ if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
262
+ encoder_seq_length = encoder_hidden_states.shape[1]
263
+ encoder_attention_mask = torch.ones(batch_size,
264
+ encoder_seq_length,
265
+ device=inputs_embeds.device,
266
+ dtype=torch.long)
267
+
268
+ # initialize past_key_values with `None` if past does not exist
269
+ if past_key_values is None:
270
+ past_key_values = [None] * len(self.block)
271
+
272
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
273
+ # ourselves in which case we just need to make it broadcastable to all heads.
274
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
275
+
276
+ # If a 2D or 3D attention mask is provided for the cross-attention
277
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
278
+ if self.is_decoder and encoder_hidden_states is not None:
279
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
280
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
281
+ if encoder_attention_mask is None:
282
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
283
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
284
+ else:
285
+ encoder_extended_attention_mask = None
286
+
287
+ if self.gradient_checkpointing and self.training:
288
+ if use_cache:
289
+ logger.warning_once(
290
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
291
+ use_cache = False
292
+
293
+ # Prepare head mask if needed
294
+ head_mask = self.get_head_mask(head_mask, self.config.num_layers)
295
+ cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)
296
+ present_key_value_states = () if use_cache else None
297
+ all_hidden_states = () if output_hidden_states else None
298
+ all_attentions = () if output_attentions else None
299
+ all_cross_attentions = () if (output_attentions and self.is_decoder) else None
300
+ position_bias = None
301
+ encoder_decoder_position_bias = None
302
+
303
+ # mod: additive absolute PE (sinusoidal)
304
+ if self.additive_pe is not None:
305
+ inputs_embeds = inputs_embeds + self.additive_pe(inputs_embeds.shape[1], past_key_values_length)
306
+ else:
307
+ pass # trinable PE is implemented in T5Block
308
+
309
+ hidden_states = self.dropout(inputs_embeds)
310
+
311
+ for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
312
+ layer_head_mask = head_mask[i]
313
+ cross_attn_layer_head_mask = cross_attn_head_mask[i]
314
+
315
+ if output_hidden_states:
316
+ all_hidden_states = all_hidden_states + (hidden_states,)
317
+
318
+ if self.gradient_checkpointing and self.training:
319
+
320
+ def create_custom_forward(module):
321
+
322
+ def custom_forward(*inputs):
323
+ return tuple(module(*inputs, use_cache, output_attentions))
324
+
325
+ return custom_forward
326
+
327
+ layer_outputs = checkpoint(
328
+ create_custom_forward(layer_module),
329
+ hidden_states,
330
+ extended_attention_mask,
331
+ position_bias,
332
+ encoder_hidden_states,
333
+ encoder_extended_attention_mask,
334
+ encoder_decoder_position_bias,
335
+ layer_head_mask,
336
+ cross_attn_layer_head_mask,
337
+ None, # past_key_value is always None with gradient checkpointing
338
+ )
339
+ else:
340
+ layer_outputs = layer_module(
341
+ hidden_states,
342
+ attention_mask=extended_attention_mask,
343
+ position_bias=position_bias,
344
+ encoder_hidden_states=encoder_hidden_states,
345
+ encoder_attention_mask=encoder_extended_attention_mask,
346
+ encoder_decoder_position_bias=encoder_decoder_position_bias,
347
+ layer_head_mask=layer_head_mask,
348
+ cross_attn_layer_head_mask=cross_attn_layer_head_mask,
349
+ past_key_value=past_key_value,
350
+ use_cache=use_cache,
351
+ output_attentions=output_attentions,
352
+ )
353
+
354
+ # layer_outputs is a tuple with:
355
+ # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)
356
+ if use_cache is False:
357
+ layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
358
+
359
+ hidden_states, present_key_value_state = layer_outputs[:2]
360
+
361
+ # We share the position biases between the layers - the first layer store them
362
+ # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
363
+ # (cross-attention position bias), (cross-attention weights)
364
+ position_bias = layer_outputs[2]
365
+ if self.is_decoder and encoder_hidden_states is not None:
366
+ encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
367
+ # append next layer key value states
368
+ if use_cache:
369
+ present_key_value_states = present_key_value_states + (present_key_value_state,)
370
+
371
+ if output_attentions:
372
+ all_attentions = all_attentions + (layer_outputs[3],)
373
+ if self.is_decoder:
374
+ all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
375
+
376
+ hidden_states = self.final_layer_norm(hidden_states)
377
+ hidden_states = self.dropout(hidden_states)
378
+
379
+ # Add last layer
380
+ if output_hidden_states:
381
+ all_hidden_states = all_hidden_states + (hidden_states,)
382
+
383
+ if not return_dict:
384
+ return tuple(v for v in [
385
+ hidden_states,
386
+ present_key_value_states,
387
+ all_hidden_states,
388
+ all_attentions,
389
+ all_cross_attentions,
390
+ ] if v is not None)
391
+ return BaseModelOutputWithPastAndCrossAttentions(
392
+ last_hidden_state=hidden_states,
393
+ past_key_values=present_key_value_states,
394
+ hidden_states=all_hidden_states,
395
+ attentions=all_attentions,
396
+ cross_attentions=all_cross_attentions,
397
+ )
398
+
399
+
400
+ class T5EncoderYMT3(T5PreTrainedModel):
401
+ # _keys_to_ignore_on_load_missing = [r"encoder.embed_tokens.weight"]
402
+
403
+ def __init__(self, encoder_config: Optional[Dict] = None, config: Optional[T5Config] = None):
404
+ if config is None:
405
+ config = T5Config()
406
+ if encoder_config is not None:
407
+ config = copy.deepcopy(config)
408
+ config.update(encoder_config)
409
+
410
+ if hasattr(config, "ff_widening_factor"):
411
+ config.d_ff = int(config.d_model) * int(config.ff_widening_factor)
412
+
413
+ config.is_decoder = False
414
+ config.use_cache = False
415
+ config.is_encoder_decoder = False
416
+
417
+ super().__init__(config)
418
+ self.model_dim = config.d_model
419
+
420
+ self.encoder = T5StackYMT3(config)
421
+
422
+ # Initialize weights and apply final processing
423
+ self.post_init()
424
+
425
+ """temporary fix for torch.compile issue"""
426
+
427
+ def forward(self, **kwargs):
428
+ if self.training is True:
429
+ return self._forward_compile(**kwargs)
430
+ else:
431
+ return self._forward_no_compile(**kwargs)
432
+
433
+ def _forward_no_compile(self, **kwargs):
434
+ return self._forward(**kwargs)
435
+
436
+ @torch.compile
437
+ def _forward_compile(self, **kwargs):
438
+ return self._forward(**kwargs)
439
+
440
+ def _forward(
441
+ self,
442
+ attention_mask: Optional[torch.FloatTensor] = None,
443
+ head_mask: Optional[torch.FloatTensor] = None,
444
+ inputs_embeds: Optional[torch.FloatTensor] = None,
445
+ output_attentions: Optional[bool] = None,
446
+ output_hidden_states: Optional[bool] = None,
447
+ return_dict: Optional[bool] = None,
448
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:
449
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
450
+
451
+ # Encode
452
+ encoder_outputs = self.encoder(
453
+ inputs_embeds=inputs_embeds,
454
+ attention_mask=attention_mask,
455
+ head_mask=head_mask,
456
+ output_attentions=output_attentions,
457
+ output_hidden_states=output_hidden_states,
458
+ return_dict=return_dict,
459
+ )
460
+
461
+ if not return_dict:
462
+ return encoder_outputs
463
+ else:
464
+ return BaseModelOutput(
465
+ last_hidden_state=encoder_outputs[0],
466
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
467
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
468
+ )
469
+
470
+
471
+ class T5DecoderYMT3(T5PreTrainedModel):
472
+
473
+ def __init__(self, decoder_config: Optional[Dict] = None, config: Optional[T5Config] = None):
474
+ if config is None:
475
+ config = T5Config()
476
+ if decoder_config is not None:
477
+ config = copy.deepcopy(config)
478
+ config.update(decoder_config)
479
+
480
+ if hasattr(config, "ff_widening_factor"):
481
+ config.d_ff = int(config.d_model) * int(config.ff_widening_factor)
482
+
483
+ config.is_decoder = True
484
+ config.is_encoder_decoder = False
485
+
486
+ super().__init__(config)
487
+ self.model_dim = config.d_model
488
+
489
+ self.decoder = T5StackYMT3(config)
490
+
491
+ # Initialize weights and apply final processing
492
+ self.post_init()
493
+
494
+ """temporary fix for torch.compile issue"""
495
+
496
+ def forward(self, **kwargs):
497
+ if self.training is True:
498
+ return self._forward_compile(**kwargs)
499
+ else:
500
+ return self._forward_no_compile(**kwargs)
501
+
502
+ def _forward_no_compile(self, **kwargs):
503
+ return self._forward(**kwargs)
504
+
505
+ @torch.compile
506
+ def _forward_compile(self, **kwargs):
507
+ return self._forward(**kwargs)
508
+
509
+ def _forward(
510
+ self,
511
+ # input_ids: torch.LongTensor, # removed since embed_tokens is outside the decoder
512
+ inputs_embeds: Optional[torch.FloatTensor] = None,
513
+ attention_mask: Optional[torch.FloatTensor] = None, # decoder_attention_mask
514
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
515
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
516
+ head_mask: Optional[torch.FloatTensor] = None,
517
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
518
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
519
+ use_cache: Optional[bool] = None,
520
+ output_attentions: Optional[bool] = None,
521
+ output_hidden_states: Optional[bool] = None,
522
+ return_dict: Optional[bool] = None,
523
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPastAndCrossAttentions]:
524
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
525
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
526
+
527
+ if isinstance(encoder_hidden_states, BaseModelOutput):
528
+ encoder_hidden_states = encoder_hidden_states.last_hidden_state
529
+
530
+ # Decode
531
+ decoder_outputs = self.decoder(
532
+ inputs_embeds=inputs_embeds,
533
+ attention_mask=attention_mask,
534
+ past_key_values=past_key_values,
535
+ encoder_hidden_states=encoder_hidden_states,
536
+ encoder_attention_mask=encoder_attention_mask,
537
+ head_mask=head_mask,
538
+ cross_attn_head_mask=cross_attn_head_mask,
539
+ use_cache=use_cache,
540
+ output_attentions=output_attentions,
541
+ output_hidden_states=output_hidden_states,
542
+ return_dict=return_dict,
543
+ )
544
+
545
+ if not return_dict:
546
+ return decoder_outputs
547
+ else:
548
+ return BaseModelOutputWithPastAndCrossAttentions(
549
+ last_hidden_state=decoder_outputs[0],
550
+ past_key_values=decoder_outputs[1],
551
+ hidden_states=decoder_outputs[2] if len(decoder_outputs) > 2 else None,
552
+ attentions=decoder_outputs[3] if len(decoder_outputs) > 3 else None,
553
+ cross_attentions=decoder_outputs[4] if len(decoder_outputs) > 4 else None,
554
+ )
555
+
556
+
557
+ class MultiChannelT5Decoder(T5PreTrainedModel):
558
+
559
+ def __init__(self, decoder_config: Optional[Dict] = None, config: Optional[T5Config] = None):
560
+ if config is None:
561
+ config = T5Config()
562
+ if decoder_config is not None:
563
+ config = copy.deepcopy(config)
564
+ config.update(decoder_config)
565
+
566
+ if hasattr(config, "ff_widening_factor"):
567
+ config.d_ff = int(config.d_model) * int(config.ff_widening_factor)
568
+
569
+ config.is_decoder = True
570
+ config.is_encoder_decoder = False
571
+
572
+ super().__init__(config)
573
+ self.model_dim = config.d_model
574
+ self.decoder = T5StackYMT3(config)
575
+
576
+ # Multi-channel parameters
577
+ self.num_channels = config.num_channels
578
+
579
+ # Initialize weights and apply final processing
580
+ self.post_init()
581
+
582
+ """temporary fix for torch.compile issue"""
583
+
584
+ def forward(self, **kwargs):
585
+ if self.training is True:
586
+ return self._forward_compile(**kwargs)
587
+ else:
588
+ return self._forward_no_compile(**kwargs)
589
+
590
+ def _forward_no_compile(self, **kwargs):
591
+ return self._forward(**kwargs)
592
+
593
+ @torch.compile
594
+ def _forward_compile(self, **kwargs):
595
+ return self._forward(**kwargs)
596
+
597
+ def _forward(
598
+ self,
599
+ # input_ids: torch.LongTensor, # removed since embed_tokens is outside the decoder
600
+ inputs_embeds: Optional[torch.FloatTensor] = None,
601
+ attention_mask: Optional[torch.FloatTensor] = None, # decoder_attention_mask
602
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
603
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
604
+ head_mask: Optional[torch.FloatTensor] = None,
605
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
606
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
607
+ use_cache: Optional[bool] = None,
608
+ output_attentions: Optional[bool] = None,
609
+ output_hidden_states: Optional[bool] = None,
610
+ return_dict: Optional[bool] = None,
611
+ ) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithPastAndCrossAttentions]:
612
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
613
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
614
+ """
615
+ Args:
616
+ inputs_embeds: torch.FloatTensor (B, K, T, D), where K is the number of channels
617
+ encoder_hidden_states: torch.FloatTensor (B, K, T, D), where K is the number of channels
618
+
619
+ Returns:
620
+ decoder_outputs: BaseModelOutputWithPastAndCrossAttentions
621
+ last_hidden_state: torch.FloatTensor (B, K, T, D), where K is the number of channels
622
+ past_key_values: Tuple[Tuple[torch.Tensor]]
623
+ hidden_states: Tuple[torch.FloatTensor]
624
+ attentions: Tuple[torch.FloatTensor]
625
+ cross_attentions: Tuple[torch.FloatTensor]
626
+
627
+ """
628
+ if isinstance(encoder_hidden_states, BaseModelOutput):
629
+ encoder_hidden_states = encoder_hidden_states.last_hidden_state
630
+
631
+ # Reshape input_embeds and encoder_hidden_states
632
+ b, k, t, d = inputs_embeds.size()
633
+ inputs_embeds = rearrange(inputs_embeds, 'b k t d -> (b k) t d')
634
+ encoder_hidden_states = rearrange(encoder_hidden_states, 'b k t d -> (b k) t d')
635
+
636
+ # K-channel Decoding
637
+ decoder_outputs = self.decoder(
638
+ inputs_embeds=inputs_embeds,
639
+ attention_mask=attention_mask,
640
+ past_key_values=past_key_values,
641
+ encoder_hidden_states=encoder_hidden_states,
642
+ encoder_attention_mask=encoder_attention_mask,
643
+ head_mask=head_mask,
644
+ cross_attn_head_mask=cross_attn_head_mask,
645
+ use_cache=use_cache,
646
+ output_attentions=output_attentions,
647
+ output_hidden_states=output_hidden_states,
648
+ return_dict=True,
649
+ )
650
+
651
+ # Reshape decoder_outputs
652
+ decoder_outputs['last_hidden_state'] = rearrange(decoder_outputs['last_hidden_state'],
653
+ '(b k) t d -> b k t d',
654
+ b=b,
655
+ k=k)
656
+
657
+ if not return_dict:
658
+ # Collecting values from decoder_outputs in a specific order
659
+ outputs = (
660
+ decoder_outputs['last_hidden_state'],
661
+ decoder_outputs.get('past_key_values', None),
662
+ decoder_outputs.get('hidden_states', None),
663
+ decoder_outputs.get('attentions', None),
664
+ decoder_outputs.get('cross_attentions', None),
665
+ )
666
+ return tuple(v for v in outputs if v is not None)
667
+ else:
668
+ return decoder_outputs # ['last_hidden_state']: (B, K, T, D)
669
+
670
+
671
+ def test_multi_channel_t5_decoder():
672
+ # Test multi-channel decoder
673
+ config = T5Config()
674
+ config.num_channels = 4
675
+ config.d_model = 32
676
+ config.num_layers = 2
677
+ config.num_heads = 2
678
+ config.num_max_positions = 64 # for positional encoding
679
+
680
+ decoder = MultiChannelT5Decoder(decoder_config=None, config=config)
681
+ decoder.eval()
682
+
683
+ input_emb = torch.rand(2, 4, 64, 32) # (B, K, T, D)
684
+ enc_hs = torch.rand(2, 4, 64, 32) # (B, K, T, D)
685
+ out = decoder(inputs_embeds=input_emb, encoder_hidden_states=enc_hs, return_dict=True)
686
+ # out['last_hidden_state']: (B, K, T, D)
687
+ # out['past_key_values']: Tuple[Tuple[torch.Tensor]]
model/t5mod_helper.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """t5mod_helper.py"""
11
+ import torch
12
+ from torch import nn
13
+ from model.t5mod import T5DecoderYMT3, MultiChannelT5Decoder
14
+ from typing import Optional, Callable, Union, Literal
15
+
16
+
17
+ @torch.no_grad()
18
+ def task_cond_dec_generate(decoder: Union[T5DecoderYMT3, MultiChannelT5Decoder],
19
+ decoder_type: Literal["t5", "multi-t5"],
20
+ embed_tokens: nn.Embedding,
21
+ lm_head: nn.Module,
22
+ encoder_hidden_states: torch.FloatTensor,
23
+ shift_right_fn: Callable,
24
+ prefix_ids: Optional[torch.LongTensor] = None,
25
+ max_length: int = 1024,
26
+ stop_at_eos: bool = True,
27
+ eos_id: Optional[int] = 1,
28
+ pad_id: Optional[int] = 0,
29
+ decoder_start_token_id: Optional[int] = 0,
30
+ debug: bool = False) -> torch.LongTensor:
31
+ """
32
+ Generate sequence by task conditioning on the decoder side
33
+ :An extension of transofrmers.generate() function for the model with
34
+ conditioning only on the decoder side.
35
+
36
+ Args:
37
+ decoder: T5DecoderYMT3 or MultiChannelT5Decoder, any decoder model with T5Stack architecture
38
+ decoder_type: Literal["t5", "multi-t5"], type of decoder
39
+ embed_tokens: nn.Embedding, embedding layer for the decoder
40
+ lm_head: nn.Module, language model head
41
+ encoder_hidden_states: torch.FloatTensor, (B, T, D) or (B, K, T, D) last hidden states
42
+ shift_right_fn: Callable, shift_right function of the decoder
43
+ prefix_ids: torch.LongTensor, (B, prefix_len) prefix ids typically used as task conditioning to decoder.
44
+ max_length: int, max token length to generate (default is 1024)
45
+ stop_at_eos: bool, whether to early-stop when all predictions in the batch are the <eos> token.
46
+ eos_id: int, the id of the <eos> token (default is 1)
47
+ pad_id: int, the id of the <pad> token (default is 0)
48
+ decoder_start_token_id: int, the id of the <bos> token (default is 0)
49
+ debug: bool, whether to print debug information
50
+
51
+ Returns:
52
+ pred_ids: torch.LongTensor, (B, task_len + N) or (B, C, task_len + N) predicted token ids
53
+ """
54
+ bsz = encoder_hidden_states.shape[0]
55
+ device = encoder_hidden_states.device
56
+
57
+ # Prepare dec_input_shape: (B, 1) or (B, C, 1)
58
+ if decoder_type == "t5":
59
+ dec_input_shape = (bsz, 1)
60
+ elif decoder_type == "multi-t5":
61
+ dec_input_shape = (bsz, decoder.num_channels, 1)
62
+ else:
63
+ raise ValueError(f"decoder_type {decoder_type} is not supported.")
64
+
65
+ # Prepare dec_input_ids: <bos> + task_prefix_token (B, prefix_len + 1) or (B, C, prefix_len + 1)
66
+ if prefix_ids is not None and prefix_ids.numel() > 0:
67
+ dec_input_ids = shift_right_fn(prefix_ids)
68
+ prefix_length = prefix_ids.shape[-1]
69
+ else:
70
+ # if prefix_ids is None, use <bos> as initial inSput
71
+ dec_input_ids = torch.tile(torch.LongTensor([decoder_start_token_id]).to(device), dec_input_shape)
72
+ prefix_length = 0
73
+ dec_inputs_embeds = embed_tokens(dec_input_ids) # (B, L, D) or (B, C, L, D)
74
+
75
+ # Generate decoder hidden state and past_key_values using prefix:
76
+ """
77
+ - initial inputs_embeds can be a sequence, without using past_key_values
78
+ - dec_hs: (B, 1, D)
79
+ - past_key_values: Tuple of length M for M layers of decoder
80
+ - pred_ids: (B, prefix_len) where N is the length of prefix_ids
81
+ """
82
+ dec_hs, past_key_values = decoder(inputs_embeds=dec_inputs_embeds,
83
+ encoder_hidden_states=encoder_hidden_states,
84
+ return_dict=False)
85
+ logits = lm_head(dec_hs) # (b, T=1, vocab_size) or (b, C, T=1, vocab_size)
86
+ pred_ids = logits.argmax(-1) # (B, prefix_len + 1) or (B, C, prefix_len + 1)
87
+
88
+ # keep track of which sequences are already finished
89
+ unfinished_sequences = torch.ones(dec_input_shape, dtype=torch.long, device=device)
90
+
91
+ # Fast generation with past_key_values for the rest of the sequence
92
+ if decoder_type == "t5":
93
+ dec_input_ids = pred_ids[:, -1].unsqueeze(-1) # (B, 1)
94
+ elif decoder_type == "multi-t5":
95
+ dec_input_ids = pred_ids[:, :, -1].unsqueeze(-1) # (B, C, 1)
96
+ for i in range(max_length - prefix_length - 1): # -1 for <eos> token
97
+ if debug:
98
+ past_key_values_length = past_key_values[0][0].shape[
99
+ 2] # past_key_values_length determines the positional embedding
100
+ print(f'i = {i}, past_key_values_length = {past_key_values_length}, pred_ids.shape = {pred_ids.shape}')
101
+
102
+ # when past_key_values is provided, we use only the last token as input_ids
103
+ dec_inputs_embeds = embed_tokens(dec_input_ids) # (B, 1, D) or (B, C, 1, D)
104
+ dec_hs, _past_key_values = decoder(inputs_embeds=dec_inputs_embeds,
105
+ encoder_hidden_states=encoder_hidden_states,
106
+ past_key_values=past_key_values,
107
+ return_dict=False)
108
+ logits = lm_head(dec_hs) # (b, 1, vocab_size) or (b, K, 1, vocab_size)
109
+ _pred_ids = logits.argmax(-1) # (B, 1) or (B, K, 1)
110
+
111
+ # update input_ids and past_key_values for next iteration
112
+ dec_input_ids = _pred_ids.clone(
113
+ ) # (B, 1) or (B, C, 1), deepcopy of _pred_ids because _pred_ids will be modified for finished sentences
114
+ past_key_values = _past_key_values
115
+
116
+ # finished sentences should have their next token be a padding token
117
+ if eos_id is not None:
118
+ if pad_id is None:
119
+ raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
120
+ _pred_ids = _pred_ids * unfinished_sequences + pad_id * (1 - unfinished_sequences)
121
+
122
+ # update pred_ids
123
+ pred_ids = torch.cat((pred_ids, _pred_ids), dim=-1) # (B, T') or (B, C, T') with increasing T'
124
+
125
+ # update state of unfinished_sequences
126
+ if eos_id is not None:
127
+ unfinished_sequences = unfinished_sequences * _pred_ids.ne(eos_id).long()
128
+
129
+ # early-stop when each sentence is finished
130
+ if stop_at_eos is True and unfinished_sequences.max() == 0:
131
+ break
132
+
133
+ return pred_ids # (B, L) or (B, C, L)
pytest.ini ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [pytest]
2
+ pythonpath = .
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ mirdata
2
+ mido
3
+ matplotlib
4
+ lightning>=2.2.1
5
+ pytest-timeout
6
+ pytest
7
+ deprecated
8
+ librosa
9
+ einops
10
+ transformers==4.45
11
+ wandb
12
+ smart-open
13
+ git+https://github.com/craffel/mir_eval.git
14
+ git+https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup.git
15
+ torchaudio
16
+ huggingface_hub
test.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """ test.py """
11
+ import os
12
+ import pprint
13
+ import argparse
14
+ import torch
15
+
16
+ from utils.data_modules import AMTDataModule
17
+ from utils.task_manager import TaskManager
18
+ from model.init_train import initialize_trainer, update_config
19
+ from model.ymt3 import YourMT3
20
+ from config.data_presets import data_preset_single_cfg, data_preset_multi_cfg
21
+ from config.vocabulary import drum_vocab_presets
22
+ from utils.utils import str2bool
23
+
24
+ # yapf: disable
25
+ parser = argparse.ArgumentParser(description="YourMT3")
26
+ # General
27
+ parser.add_argument('exp_id', type=str, help='A unique identifier for the experiment is used to resume training. The "@" symbol can be used to load a specific checkpoint.')
28
+ parser.add_argument('-p', '--project', type=str, default='ymt3', help='project name')
29
+ parser.add_argument('-d', '--data-preset', type=str, default='musicnet_thickstun_ext_em', help='dataset preset (default=musicnet_thickstun_ext_em). See config/data.py for more options.')
30
+ # Audio configurations
31
+ parser.add_argument('-ac', '--audio-codec', type=str, default=None, help='audio codec (default=None). {"spec", "melspec"}. If None, default value defined in config.py will be used.')
32
+ parser.add_argument('-hop', '--hop-length', type=int, default=None, help='hop length in frames (default=None). {128, 300} 128 for MT3, 300 for PerceiverTFIf None, default value defined in config.py will be used.')
33
+ parser.add_argument('-nmel', '--n-mels', type=int, default=None, help='number of mel bins (default=None). If None, default value defined in config.py will be used.')
34
+ parser.add_argument('-if', '--input-frames', type=int, default=None, help='number of audio frames for input segment (default=None). If None, default value defined in config.py will be used.')
35
+ # Model configurations
36
+ parser.add_argument('-sqr', '--sca-use-query-residual', type=str2bool, default=None, help='sca use query residual flag. Default follows config.py')
37
+ parser.add_argument('-enc', '--encoder-type', type=str, default=None, help="Encoder type. 't5' or 'perceiver-tf' or 'conformer'. Default is 't5', following config.py.")
38
+ parser.add_argument('-dec', '--decoder-type', type=str, default=None, help="Decoder type. 't5' or 'multi-t5'. Default is 't5', following config.py.")
39
+ parser.add_argument('-preenc', '--pre-encoder-type', type=str, default='default', help="Pre-encoder type. None or 'conv' or 'default'. By default, t5_enc:None, perceiver_tf_enc:conv, conformer:None")
40
+ parser.add_argument('-predec', '--pre-decoder-type', type=str, default='default', help="Pre-decoder type. {None, 'linear', 'conv1', 'mlp', 'group_linear'} or 'default'. Default is {'t5': None, 'perceiver-tf': 'linear', 'conformer': None}.")
41
+ parser.add_argument('-cout', '--conv-out-channels', type=int, default=None, help='Number of filters for pre-encoder conv layer. Default follows "model_cfg" of config.py.')
42
+ parser.add_argument('-tenc', '--task-cond-encoder', type=str2bool, default=True, help='task conditional encoder (default=True). True or False')
43
+ parser.add_argument('-tdec', '--task-cond-decoder', type=str2bool, default=True, help='task conditional decoder (default=True). True or False')
44
+ parser.add_argument('-df', '--d-feat', type=int, default=None, help='Audio feature will be projected to this dimension for Q,K,V of T5 or K,V of Perceiver (default=None). If None, default value defined in config.py will be used.')
45
+ parser.add_argument('-pt', '--pretrained', type=str2bool, default=False, help='pretrained T5(default=False). True or False')
46
+ parser.add_argument('-b', '--base-name', type=str, default="google/t5-v1_1-small", help='base model name (default="google/t5-v1_1-small")')
47
+ parser.add_argument('-epe', '--encoder-position-encoding-type', type=str, default='default', help="Positional encoding type of encoder. By default, pre-defined PE for T5 or Perceiver-TF encoder in config.py. For T5: {'sinusoidal', 'trainable'}, conformer: {'rotary', 'trainable'}, Perceiver-TF: {'trainable', 'rope', 'alibi', 'alibit', 'None', '0', 'none', 'tkd', 'td', 'tk', 'kdt'}.")
48
+ parser.add_argument('-dpe', '--decoder-position-encoding-type', type=str, default='default', help="Positional encoding type of decoder. By default, pre-defined PE for T5 in config.py. {'sinusoidal', 'trainable'}.")
49
+ parser.add_argument('-twe', '--tie-word-embedding', type=str2bool, default=None, help='tie word embedding (default=None). If None, default value defined in config.py will be used.')
50
+ parser.add_argument('-el', '--event-length', type=int, default=None, help='event length (default=None). If None, default value defined in model cfg of config.py will be used.')
51
+ # Perceiver-TF configurations
52
+ parser.add_argument('-dl', '--d-latent', type=int, default=None, help='Latent dimension of Perceiver. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
53
+ parser.add_argument('-nl', '--num-latents', type=int, default=None, help='Number of latents of Perceiver. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
54
+ parser.add_argument('-dpm', '--perceiver-tf-d-model', type=int, default=None, help='Perceiver-TF d_model (default=None). If None, default value defined in config.py will be used.')
55
+ parser.add_argument('-npb', '--num-perceiver-tf-blocks', type=int, default=None, help='Number of blocks of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py.')
56
+ parser.add_argument('-npl', '--num-perceiver-tf-local-transformers-per-block', type=int, default=None, help='Number of local layers per block of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
57
+ parser.add_argument('-npt', '--num-perceiver-tf-temporal-transformers-per-block', type=int, default=None, help='Number of temporal layers per block of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
58
+ parser.add_argument('-atc', '--attention-to-channel', type=str2bool, default=None, help='Attention to channel flag of Perceiver-TF. On T5, this will be ignored (default=None). If None, default value defined in config.py will be used.')
59
+ parser.add_argument('-ln', '--layer-norm-type', type=str, default=None, help='Layer normalization type (default=None). {"layer_norm", "rms_norm"}. If None, default value defined in config.py will be used.')
60
+ parser.add_argument('-ff', '--ff-layer-type', type=str, default=None, help='Feed forward layer type (default=None). {"mlp", "moe", "gmlp"}. If None, default value defined in config.py will be used.')
61
+ parser.add_argument('-wf', '--ff-widening-factor', type=int, default=None, help='Feed forward layer widening factor for MLP/MoE/gMLP (default=None). If None, default value defined in config.py will be used.')
62
+ parser.add_argument('-nmoe', '--moe-num-experts', type=int, default=None, help='Number of experts for MoE (default=None). If None, default value defined in config.py will be used.')
63
+ parser.add_argument('-kmoe', '--moe-topk', type=int, default=None, help='Top-k for MoE (default=None). If None, default value defined in config.py will be used.')
64
+ parser.add_argument('-act', '--hidden-act', type=str, default=None, help='Hidden activation function (default=None). {"gelu", "silu", "relu", "tanh"}. If None, default value defined in config.py will be used.')
65
+ parser.add_argument('-rt', '--rotary-type', type=str, default=None, help='Rotary embedding type expressed in three letters. e.g. ppl: "pixel" for SCA and latents, "lang" for temporal transformer. If None, use config.')
66
+ parser.add_argument('-rk', '--rope-apply-to-keys', type=str2bool, default=None, help='Apply rope to keys (default=None). If None, use config.')
67
+ parser.add_argument('-rp', '--rope-partial-pe', type=str2bool, default=None, help='Whether to apply RoPE to partial positions (default=None). If None, use config.')
68
+ # Decoder configurations
69
+ parser.add_argument('-dff', '--decoder-ff-layer-type', type=str, default=None, help='Feed forward layer type of decoder (default=None). {"mlp", "moe", "gmlp"}. If None, default value defined in config.py will be used.')
70
+ parser.add_argument('-dwf', '--decoder-ff-widening-factor', type=int, default=None, help='Feed forward layer widening factor for decoder MLP/MoE/gMLP (default=None). If None, default value defined in config.py will be used.')
71
+ # Task and Evaluation configurations
72
+ parser.add_argument('-tk', '--task', type=str, default='mt3_full_plus', help='tokenizer type (default=mt3_full_plus). See config/task.py for more options.')
73
+ parser.add_argument('-epv', '--eval-program-vocab', type=str, default=None, help='evaluation vocabulary (default=None). If None, default vocabulary of the data preset will be used.')
74
+ parser.add_argument('-edv', '--eval-drum-vocab', type=str, default=None, help='evaluation vocabulary for drum (default=None). If None, default vocabulary of the data preset will be used.')
75
+ parser.add_argument('-etk', '--eval-subtask-key', type=str, default='default', help='evaluation subtask key (default=default). See config/task.py for more options.')
76
+ parser.add_argument('-t', '--onset-tolerance', type=float, default=0.05, help='onset tolerance (default=0.05).')
77
+ parser.add_argument('-os', '--test-octave-shift', type=str2bool, default=False, help='test optimal octave shift (default=False). True or False')
78
+ parser.add_argument('-w', '--write-model-output', type=str2bool, default=False, help='write model test output to file (default=False). True or False')
79
+ # Trainer configurations
80
+ parser.add_argument('-pr','--precision', type=str, default="bf16-mixed", help='precision (default="bf16-mixed") {32, 16, bf16, bf16-mixed}')
81
+ parser.add_argument('-st', '--strategy', type=str, default='auto', help='strategy (default=auto). auto or deepspeed or ddp')
82
+ parser.add_argument('-n', '--num-nodes', type=int, default=1, help='number of nodes (default=1)')
83
+ parser.add_argument('-g', '--num-gpus', type=str, default='auto', help='number of gpus (default="auto")')
84
+ parser.add_argument('-wb', '--wandb-mode', type=str, default=None, help='wandb mode for logging (default=None). "disabled" or "online" or "offline". If None, default value defined in config.py will be used.')
85
+ # Debug
86
+ parser.add_argument('-debug', '--debug-mode', type=str2bool, default=False, help='debug mode (default=False). True or False')
87
+ parser.add_argument('-tps', '--test-pitch-shift', type=int, default=None, help='use pitch shift when testing. debug-purpose only. (default=None). semitone in int.')
88
+ args = parser.parse_args()
89
+ # yapf: enable
90
+ if torch.__version__ >= "1.13":
91
+ torch.set_float32_matmul_precision("high")
92
+ args.epochs = None
93
+
94
+ # Initialize trainer
95
+ trainer, wandb_logger, dir_info, shared_cfg = initialize_trainer(args, stage='test')
96
+
97
+ # Update config with args, including augmentation settings
98
+ shared_cfg, audio_cfg, model_cfg = update_config(args, shared_cfg, stage='test')
99
+
100
+
101
+ def main():
102
+ # Data preset
103
+ if args.data_preset in data_preset_single_cfg:
104
+ # convert single preset into multi preset format
105
+ data_preset = {
106
+ "presets": [args.data_preset],
107
+ "eval_vocab": data_preset_single_cfg[args.data_preset]["eval_vocab"],
108
+ }
109
+ for k in data_preset_single_cfg[args.data_preset].keys():
110
+ if k in ["eval_drum_vocab", "add_pitch_class_metric"]:
111
+ data_preset[k] = data_preset_single_cfg[args.data_preset][k]
112
+ elif args.data_preset in data_preset_multi_cfg:
113
+ data_preset = data_preset_multi_cfg[args.data_preset]
114
+ else:
115
+ raise ValueError("Invalid data preset")
116
+ eval_drum_vocab = data_preset.get("eval_drum_vocab", None)
117
+
118
+ if args.eval_drum_vocab != None: # override eval_drum_vocab
119
+ eval_drum_vocab = drum_vocab_presets[args.eval_drum_vocab]
120
+
121
+ # Task manager
122
+ tm = TaskManager(task_name=args.task,
123
+ max_shift_steps=int(shared_cfg["TOKENIZER"]["max_shift_steps"]),
124
+ debug_mode=args.debug_mode)
125
+ print(f"Task: {tm.task_name}, Max Shift Steps: {tm.max_shift_steps}")
126
+
127
+ results = []
128
+ for i, preset in enumerate(data_preset["presets"]):
129
+ # sdp: unpacking multi preset as a list of single presets
130
+ sdp = {
131
+ "presets": [preset],
132
+ "eval_vocab": [data_preset["eval_vocab"][i]],
133
+ "eval_drum_vocab": eval_drum_vocab,
134
+ }
135
+ for k in data_preset.keys():
136
+ if k not in ["presets", "eval_vocab"]:
137
+ sdp[k] = data_preset[k]
138
+
139
+ dm = AMTDataModule(data_preset_multi=sdp, task_manager=tm, audio_cfg=audio_cfg)
140
+
141
+ model = YourMT3(
142
+ audio_cfg=audio_cfg,
143
+ model_cfg=model_cfg,
144
+ shared_cfg=shared_cfg,
145
+ optimizer=None,
146
+ task_manager=tm, # tokenizer is a member of task_manager
147
+ eval_subtask_key=args.eval_subtask_key,
148
+ eval_vocab=args.eval_program_vocab if args.eval_program_vocab != None else sdp["eval_vocab"],
149
+ eval_drum_vocab=sdp["eval_drum_vocab"],
150
+ write_output_dir=dir_info["lightning_dir"] if args.write_model_output or args.test_octave_shift else None,
151
+ onset_tolerance=float(args.onset_tolerance),
152
+ add_pitch_class_metric=sdp.get("add_pitch_class_metric", None),
153
+ test_optimal_octave_shift=args.test_octave_shift,
154
+ test_pitch_shift_layer=args.test_pitch_shift)
155
+
156
+ # load checkpoint & drop pitchshift from state_dict
157
+ checkpoint = torch.load(dir_info["last_ckpt_path"])
158
+ state_dict = checkpoint['state_dict']
159
+ new_state_dict = {k: v for k, v in state_dict.items() if 'pitchshift' not in k}
160
+ model.load_state_dict(new_state_dict, strict=False)
161
+ # if args.test_pitch_shift is None:
162
+ # new_state_dict = {k: v for k, v in state_dict.items() if 'pitchshift' not in k}
163
+ # model.load_state_dict(new_state_dict, strict=False)
164
+ # else:
165
+ # model.load_state_dict(state_dict, strict=False)
166
+
167
+ results.append("-----------------------------------------------------------------")
168
+ results.append(sdp)
169
+ results.append(trainer.test(model, datamodule=dm))
170
+ # TODO: directly load checkpoint including hyperparmeters https://lightning.ai/docs/pytorch/1.6.2/common/hyperparameters.html
171
+
172
+ # save result
173
+ pp = pprint.PrettyPrinter(indent=4)
174
+ results_str = pp.pformat(results)
175
+ result_file = os.path.join(dir_info["lightning_dir"],
176
+ f"result_{args.task}_{args.eval_subtask_key}_{args.data_preset}.json")
177
+ with open(result_file, 'w') as f:
178
+ f.write(results_str)
179
+ print(f"Result is saved to {result_file}")
180
+
181
+
182
+ if __name__ == "__main__":
183
+ main()
tests/.DS_Store ADDED
Binary file (6.15 kB). View file
 
tests/assert_fns.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ assert_fn.py """
2
+ import numpy as np
3
+
4
+
5
+ def assert_notes_almost_equal(actual_notes, predicted_notes, delta=5e-3):
6
+ """
7
+ Asserts that the given lists of Note instances are equal up to a small
8
+ floating-point tolerance, similar to `assertAlmostEqual` of `unittest`.
9
+ Tolerance is 5e-3 by default, which is 5 ms for 100 ticks-per-second.
10
+ """
11
+ assert len(actual_notes) == len(predicted_notes)
12
+ for actual_note, predicted_note in zip(actual_notes, predicted_notes):
13
+ assert abs(actual_note.onset - predicted_note.onset) < delta
14
+ assert abs(actual_note.offset - predicted_note.offset) < delta
15
+ assert actual_note.pitch == predicted_note.pitch
16
+ if actual_note.is_drum is False and predicted_note.is_drum is False:
17
+ assert actual_note.program == predicted_note.program
18
+ assert actual_note.is_drum == predicted_note.is_drum
19
+ assert actual_note.velocity == predicted_note.velocity
20
+
21
+
22
+ def assert_note_events_almost_equal(actual_note_events,
23
+ predicted_note_events,
24
+ ignore_time=False,
25
+ ignore_activity=True,
26
+ delta=5.1e-3):
27
+ """
28
+ Asserts that the given lists of Note instances are equal up to a small
29
+ floating-point tolerance, similar to `assertAlmostEqual` of `unittest`.
30
+ Tolerance is 5e-3 by default, which is 5 ms for 100 ticks-per-second.
31
+
32
+ If `ignore_time` is True, then the time field is ignored. (useful for
33
+ comparing tie note events, default is False)
34
+
35
+ If `ignore_activity` is True, then the activity field is ignored (default
36
+ is True).
37
+ """
38
+ assert len(actual_note_events) == len(predicted_note_events)
39
+ for j, (actual_note_event,
40
+ predicted_note_event) in enumerate(zip(actual_note_events, predicted_note_events)):
41
+ if ignore_time is False:
42
+ assert abs(actual_note_event.time - predicted_note_event.time) <= delta
43
+ assert actual_note_event.is_drum == predicted_note_event.is_drum
44
+ if actual_note_event.is_drum is False and predicted_note_event.is_drum is False:
45
+ assert actual_note_event.program == predicted_note_event.program
46
+ assert actual_note_event.pitch == predicted_note_event.pitch
47
+ assert actual_note_event.velocity == predicted_note_event.velocity
48
+ if ignore_activity is False:
49
+ assert actual_note_event.activity == predicted_note_event.activity
50
+
51
+
52
+ def assert_track_metrics_score1(metrics) -> None:
53
+ for k, v in metrics.items():
54
+ if np.isnan(v) is False:
55
+ assert v == 1.0
tests/audio_test.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """audio_test.py"""
11
+ import unittest
12
+ import os
13
+ import numpy as np
14
+ import wave
15
+ import tempfile
16
+ from utils.audio import load_audio_file
17
+ from utils.audio import get_audio_file_info
18
+ from utils.audio import slice_padded_array
19
+ from utils.audio import slice_padded_array_for_subbatch
20
+ from utils.audio import write_wav_file
21
+
22
+
23
+ class TestLoadAudioFile(unittest.TestCase):
24
+
25
+ def create_temp_wav_file(self, duration: float, fs: int = 16000) -> str:
26
+ n_samples = int(duration * fs)
27
+ temp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
28
+ temp_filename = temp_file.name
29
+
30
+ data = np.random.randint(-2**15, 2**15, n_samples, dtype=np.int16)
31
+
32
+ with wave.open(temp_filename, 'wb') as f:
33
+ f.setnchannels(1)
34
+ f.setsampwidth(2)
35
+ f.setframerate(fs)
36
+ f.writeframes(data.tobytes())
37
+
38
+ return temp_filename
39
+
40
+ def test_load_audio_file(self):
41
+ duration = 3.0
42
+ fs = 16000
43
+ temp_filename = self.create_temp_wav_file(duration, fs)
44
+
45
+ # Test load entire file
46
+ audio_data = load_audio_file(temp_filename, dtype=np.int16)
47
+ file_fs, n_frames, n_channels = get_audio_file_info(temp_filename)
48
+
49
+ self.assertEqual(len(audio_data), n_frames)
50
+ self.assertEqual(file_fs, fs)
51
+ self.assertEqual(n_channels, 1)
52
+
53
+ # Test load specific segment
54
+ seg_start_sec = 1.0
55
+ seg_length_sec = 1.0
56
+ audio_data = load_audio_file(temp_filename, seg_start_sec, seg_length_sec, dtype=np.int16)
57
+
58
+ self.assertEqual(len(audio_data), int(seg_length_sec * fs))
59
+
60
+ # Test unsupported file extension
61
+ with self.assertRaises(NotImplementedError):
62
+ load_audio_file("unsupported.xyz")
63
+
64
+
65
+ class TestSliceArray(unittest.TestCase):
66
+
67
+ def setUp(self):
68
+ self.x = np.random.randint(0, 10, size=(1, 10000))
69
+
70
+ def test_without_padding(self):
71
+ sliced_x = slice_padded_array(self.x, slice_length=100, slice_hop=50, pad=False)
72
+ self.assertEqual(sliced_x.shape, (199, 100))
73
+
74
+ def test_with_padding(self):
75
+ sliced_x = slice_padded_array(self.x, slice_length=100, slice_hop=50, pad=True)
76
+ self.assertEqual(sliced_x.shape, (199, 100))
77
+
78
+ def test_content(self):
79
+ sliced_x = slice_padded_array(self.x, slice_length=100, slice_hop=50, pad=True)
80
+ for i in range(sliced_x.shape[0] - 1):
81
+ np.testing.assert_array_equal(sliced_x[i, :], self.x[:, i * 50:i * 50 + 100].flatten())
82
+ # Test the last slice separately to account for potential padding
83
+ last_slice = sliced_x[-1, :]
84
+ last_slice_no_padding = self.x[:, -100:].flatten()
85
+ np.testing.assert_array_equal(last_slice[:len(last_slice_no_padding)], last_slice_no_padding)
86
+
87
+
88
+ class TestSlicePadForSubbatch(unittest.TestCase):
89
+
90
+ def test_slice_padded_array_for_subbatch(self):
91
+ input_array = np.random.randn(6, 10)
92
+ slice_length = 4
93
+ slice_hop = 2
94
+ pad = True
95
+ sub_batch_size = 4
96
+
97
+ expected_output_shape = (4, 4)
98
+
99
+ # Call the slice_pad_for_subbatch function
100
+ result = slice_padded_array_for_subbatch(input_array, slice_length, slice_hop, pad, sub_batch_size)
101
+
102
+ # Check if the output shape is correct
103
+ self.assertEqual(result.shape, expected_output_shape)
104
+
105
+ # Check if the number of slices is divisible by sub_batch_size
106
+ self.assertEqual(result.shape[0] % sub_batch_size, 0)
107
+
108
+
109
+ class TestWriteWavFile(unittest.TestCase):
110
+
111
+ def test_write_wav_file_z(self):
112
+ # Generate some test audio data
113
+ samplerate = 16000
114
+ duration = 1 # 1 second
115
+ t = np.linspace(0, duration, int(samplerate * duration), endpoint=False)
116
+ x = np.sin(2 * np.pi * 440 * t)
117
+
118
+ # Write the test audio data to a WAV file
119
+ filename = "extras/test.wav"
120
+ write_wav_file(filename, x, samplerate)
121
+
122
+ # Read the written WAV file and check its contents
123
+ with wave.open(filename, "rb") as wav_file:
124
+ # Check the WAV file parameters
125
+ self.assertEqual(wav_file.getnchannels(), 1)
126
+ self.assertEqual(wav_file.getsampwidth(), 2)
127
+ self.assertEqual(wav_file.getframerate(), samplerate)
128
+ self.assertEqual(wav_file.getnframes(), len(x))
129
+
130
+ # Read the audio samples from the WAV file
131
+ data = wav_file.readframes(len(x))
132
+
133
+ # Convert the audio sample byte string to a NumPy array and normalize it to the range [-1, 1]
134
+ x_read = np.frombuffer(data, dtype=np.int16) / 32767.0
135
+
136
+ # Check that the audio samples read from the WAV file are equal to the original audio samples
137
+ np.testing.assert_allclose(x_read, x, atol=1e-4)
138
+
139
+ # Delete the written WAV file
140
+ os.remove(filename)
141
+
142
+
143
+ if __name__ == '__main__':
144
+ unittest.main()
tests/event2note_test.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """event2midi_test.py:
11
+
12
+ This file contains tests for the following classes:
13
+ • event2note_event
14
+ • note_event2note
15
+
16
+ """
17
+ import unittest
18
+ import pytest
19
+ from numpy import random
20
+ from assert_fns import assert_notes_almost_equal
21
+ from utils.note_event_dataclasses import Event, Note, NoteEvent
22
+ from utils.event2note import event2note_event
23
+ from utils.event2note import note_event2note
24
+
25
+
26
+ # yapf: disable
27
+ class TestEvent2NoteEvent(unittest.TestCase):
28
+ def test_event2note_event(self):
29
+ events = [
30
+ Event('program', 33), Event('pitch', 60),
31
+ Event('program', 52), Event('pitch', 40),
32
+ Event('tie', 0),
33
+ Event('shift', 20), Event('velocity', 1), Event('drum', 36),
34
+ Event('shift', 150), Event('program', 33), Event('velocity', 0), Event('pitch', 60),
35
+ Event('shift', 160), Event('velocity', 1), Event('pitch', 62), Event('program', 100),
36
+ Event('pitch', 77),
37
+ Event('shift', 200), Event('velocity', 0), Event('pitch', 77),
38
+ Event('shift', 250), Event('velocity', 1), Event('drum', 38),
39
+ Event('shift', 300), Event('velocity', 0), Event('program', 33), Event('pitch', 62)
40
+ ]
41
+
42
+ note_events, tie_note_events, last_activity, err_cnt = event2note_event(events, start_time=0, sort=False, tps=100)
43
+ self.assertEqual(len(err_cnt), 0)
44
+ expected_note_events = [NoteEvent(True, 128, 0.2, 1, 36),
45
+ NoteEvent(False, 33, 1.5, 0, 60),
46
+ NoteEvent(False, 33, 1.6, 1, 62),
47
+ NoteEvent(False, 100, 1.6, 1, 77),
48
+ NoteEvent(False, 100, 2.0, 0, 77),
49
+ NoteEvent(True, 128, 2.5, 1, 38),
50
+ NoteEvent(False, 33, 3.0, 0, 62)]
51
+ expected_tie_note_events = [NoteEvent(False, 33, None, 1, 60),
52
+ NoteEvent(False, 52, None, 1, 40)]
53
+ expected_last_activity = [(52, 40)]
54
+ self.assertSequenceEqual(note_events, expected_note_events)
55
+ self.assertSequenceEqual(tie_note_events, expected_tie_note_events)
56
+ self.assertSequenceEqual(last_activity, expected_last_activity)
57
+
58
+ class TestEvent2NoteEventInvalidInputWarn(unittest.TestCase):
59
+ def test_event2note_event_with_invalid_shift_value(self):
60
+ events = [Event('tie', 0), Event('shift', 0), Event('shift', 1050)] # shift: 0 <= value <= 1000
61
+ _, _, _, err_cnt = event2note_event(events, start_time=0, sort=True, tps=100)
62
+ self.assertEqual(err_cnt['Err/Shift out of range'], 2)
63
+
64
+ def test_event2note_event_with_invalid_pitch_event(self):
65
+ events = [Event('pitch', 60), Event('tie', 0)] # pitch event must follow a program event
66
+ _, _, _, err_cnt = event2note_event(events, start_time=0, sort=True, tps=100)
67
+ self.assertEqual(err_cnt['Err/Missing prg in tie'], 1)
68
+
69
+ def test_event2note_event_with_invalid_tie_event(self):
70
+ events = [Event('shift', 10)]
71
+ _, _, _, err_cnt = event2note_event(events, start_time=0, sort=True, tps=100)
72
+ self.assertEqual(err_cnt['Err/Missing tie'], 1)
73
+
74
+ class TestEvent2NoteEventSpecialEvent(unittest.TestCase):
75
+ def test_note_event2note_special_events(self):
76
+ events = [Event('program', 33), Event('pitch', 60),
77
+ Event('tie', 0),
78
+ Event('shift', 10), Event('program', 33), Event('velocity', 0), Event('pitch', 60),
79
+ Event('EOS', 0), Event('PAD', 0), # <- will stop decoding at this point...
80
+ Event('shift', 20), Event('velocity', 1), Event('pitch', 20),
81
+ Event('shift', 30), Event('velocity', 1), Event('pitch', 20),]
82
+ note_events, tie_note_events, _, err_cnt = event2note_event(events, start_time=0)
83
+ print(note_events)
84
+ self.assertEqual(len(note_events), 1)
85
+ self.assertEqual(len(tie_note_events), 1)
86
+ self.assertEqual(len(err_cnt), 0)
87
+
88
+
89
+ class TestNoteEvent2Note(unittest.TestCase):
90
+
91
+ def test_note_event2note(self):
92
+
93
+ note_events = [NoteEvent(is_drum=False, program=33, time=0, velocity=1, pitch=60),
94
+ NoteEvent(is_drum=False, program=33, time=1.5, velocity=0, pitch=60),
95
+ NoteEvent(is_drum=False, program=33, time=1.6, velocity=1, pitch=62),
96
+ NoteEvent(is_drum=False, program=33, time=3.0, velocity=0, pitch=62),
97
+ NoteEvent(is_drum=False, program=100, time=1.6, velocity=1, pitch=77),
98
+ NoteEvent(is_drum=False, program=100, time=2.0, velocity=0, pitch=77),
99
+ NoteEvent(is_drum=True, program=128, time=0.2, velocity=1, pitch=36),
100
+ NoteEvent(is_drum=True, program=128, time=2.5, velocity=1, pitch=38)
101
+ ]
102
+ notes, err_cnt = note_event2note(note_events, sort=True)
103
+
104
+ expected_notes = [
105
+ Note(is_drum=False, program=33, onset=0, offset=1.5, pitch=60, velocity=1),
106
+ Note(is_drum=True, program=128, onset=0.2, offset=0.21, pitch=36, velocity=1),
107
+ Note(is_drum=False, program=33, onset=1.6, offset=3.0, pitch=62, velocity=1),
108
+ Note(is_drum=False, program=100, onset=1.6, offset=2.0, pitch=77, velocity=1),
109
+ Note(is_drum=True, program=128, onset=2.5, offset=2.51, pitch=38, velocity=1)
110
+ ]
111
+ self.assertEqual(len(err_cnt), 0)
112
+ assert_notes_almost_equal(notes, expected_notes, delta=5e-3)
113
+
114
+
115
+ def test_note_event2note_simple_cases(self):
116
+ # Case 1: Basic test case with two notes
117
+ note_events = [
118
+ NoteEvent(is_drum=False, program=0, time=0.1, velocity=1, pitch=60),
119
+ NoteEvent(is_drum=False, program=0, time=0.5, velocity=0, pitch=60),
120
+ NoteEvent(is_drum=False, program=0, time=0.7, velocity=1, pitch=62),
121
+ NoteEvent(is_drum=False, program=0, time=1.5, velocity=0, pitch=62),
122
+ ]
123
+
124
+ expected_notes = [
125
+ Note(is_drum=False, program=0, onset=0.1, offset=0.5, pitch=60, velocity=1),
126
+ Note(is_drum=False, program=0, onset=0.7, offset=1.5, pitch=62, velocity=1),
127
+ ]
128
+ notes, err_cnt = note_event2note(note_events)
129
+ self.assertEqual(len(err_cnt), 0)
130
+ self.assertSequenceEqual(notes, expected_notes)
131
+
132
+ # Case 2: Test with drum notes
133
+ note_events = [
134
+ NoteEvent(is_drum=True, program=128, time=0.2, velocity=1, pitch=36),
135
+ NoteEvent(is_drum=True, program=128, time=0.3, velocity=1, pitch=38),
136
+ NoteEvent(is_drum=True, program=128, time=0.4, velocity=0, pitch=36),
137
+ NoteEvent(is_drum=True, program=128, time=0.5, velocity=0, pitch=38),
138
+ ]
139
+
140
+ expected_notes = [
141
+ Note(is_drum=True, program=128, onset=0.2, offset=0.21, pitch=36, velocity=1),
142
+ Note(is_drum=True, program=128, onset=0.3, offset=0.31, pitch=38, velocity=1),
143
+ ]
144
+ notes, err_cnt = note_event2note(note_events)
145
+ self.assertEqual(len(err_cnt), 0)
146
+ assert_notes_almost_equal(notes, expected_notes, delta=5.1e-3)
147
+
148
+
149
+ def test_note_event2note_multiple_overlapping_notes(self):
150
+
151
+ note_events = [
152
+ NoteEvent(is_drum=False, program=1, time=0.0, velocity=1, pitch=60),
153
+ NoteEvent(is_drum=False, program=1, time=0.5, velocity=0, pitch=60),
154
+ NoteEvent(is_drum=False, program=1, time=1.0, velocity=1, pitch=62),
155
+ NoteEvent(is_drum=False, program=1, time=1.5, velocity=0, pitch=62),
156
+ NoteEvent(is_drum=False, program=2, time=0.25, velocity=1, pitch=60),
157
+ NoteEvent(is_drum=False, program=2, time=0.75, velocity=0, pitch=60),
158
+ NoteEvent(is_drum=False, program=2, time=1.25, velocity=1, pitch=62),
159
+ NoteEvent(is_drum=False, program=2, time=1.75, velocity=0, pitch=62),
160
+ NoteEvent(is_drum=False, program=3, time=0.0, velocity=1, pitch=64),
161
+ NoteEvent(is_drum=False, program=3, time=1.0, velocity=0, pitch=64),
162
+ NoteEvent(is_drum=False, program=4, time=0.5, velocity=1, pitch=66),
163
+ NoteEvent(is_drum=False, program=4, time=1.5, velocity=0, pitch=66),
164
+ NoteEvent(is_drum=False, program=4, time=0.75, velocity=1, pitch=67),
165
+ NoteEvent(is_drum=False, program=4, time=1.75, velocity=0, pitch=67),
166
+ NoteEvent(is_drum=False, program=4, time=1.0, velocity=1, pitch=69),
167
+ NoteEvent(is_drum=False, program=4, time=2.0, velocity=0, pitch=69)
168
+ ]
169
+
170
+ expected_notes = [
171
+ Note(is_drum=False, program=1, onset=0.0, offset=0.5, pitch=60, velocity=1),
172
+ Note(is_drum=False, program=3, onset=0.0, offset=1.0, pitch=64, velocity=1),
173
+ Note(is_drum=False, program=2, onset=0.25, offset=0.75, pitch=60, velocity=1),
174
+ Note(is_drum=False, program=4, onset=0.5, offset=1.5, pitch=66, velocity=1),
175
+ Note(is_drum=False, program=4, onset=0.75, offset=1.75, pitch=67, velocity=1),
176
+ Note(is_drum=False, program=1, onset=1.0, offset=1.5, pitch=62, velocity=1),
177
+ Note(is_drum=False, program=4, onset=1.0, offset=2.0, pitch=69, velocity=1),
178
+ Note(is_drum=False, program=2, onset=1.25, offset=1.75, pitch=62, velocity=1)
179
+ ]
180
+
181
+ notes, err_cnt = note_event2note(note_events)
182
+ self.assertEqual(len(err_cnt), 0)
183
+ assert_notes_almost_equal(notes, expected_notes, delta=5e-3)
184
+
185
+ # yapf: enable
186
+ if __name__ == '__main__':
187
+ unittest.main()
tests/event_codec_test.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """event_codec_test.py:
11
+
12
+ This file contains tests for the following classes:
13
+ • Event
14
+ • EventRange
15
+ • FastCodec equivalent to MT3 author's Codec
16
+
17
+ See tokenizer_test.py for the FastCodec performance benchmark
18
+
19
+ """
20
+ import unittest
21
+ from utils.note_event_dataclasses import Event, EventRange
22
+ from utils.event_codec import FastCodec as Codec
23
+ # from utils.event_codec import Codec
24
+
25
+
26
+ class TestEvent(unittest.TestCase):
27
+
28
+ def test_Event(self):
29
+ e = Event(type='shift', value=0)
30
+ self.assertEqual(e.type, 'shift')
31
+ self.assertEqual(e.value, 0)
32
+
33
+
34
+ class TestEventRange(unittest.TestCase):
35
+
36
+ def test_EventRange(self):
37
+ er = EventRange('abc', min_value=0, max_value=500)
38
+ self.assertEqual(er.type, 'abc')
39
+ self.assertEqual(er.min_value, 0)
40
+ self.assertEqual(er.max_value, 500)
41
+
42
+
43
+ class TestEventCodec(unittest.TestCase):
44
+
45
+ def test_event_codec(self):
46
+ ec = Codec(
47
+ special_tokens=['asd'],
48
+ max_shift_steps=1001,
49
+ event_ranges=[
50
+ EventRange('pitch', min_value=0, max_value=127),
51
+ EventRange('velocity', min_value=0, max_value=1),
52
+ EventRange('tie', min_value=0, max_value=0),
53
+ EventRange('program', min_value=0, max_value=127),
54
+ EventRange('drum', min_value=0, max_value=127),
55
+ ],
56
+ )
57
+
58
+ events = [
59
+ Event(type='shift', value=0), # actually not needed
60
+ Event(type='shift', value=1), # 10 ms shift
61
+ Event(type='shift', value=1000), # 10 s shift
62
+ Event(type='pitch', value=0), # lowest pitch 8.18 Hz
63
+ Event(type='pitch', value=60), # C4 or 261.63 Hz
64
+ Event(type='pitch', value=127), # highest pitch G9 or 12543.85 Hz
65
+ Event(type='velocity', value=0), # lowest velocity)
66
+ Event(type='velocity', value=1), # lowest velocity)
67
+ Event(type='tie', value=0), # tie
68
+ Event(type='program', value=0), # program
69
+ Event(type='program', value=127), # program
70
+ Event(type='drum', value=0), # drum
71
+ Event(type='drum', value=127), # drum
72
+ ]
73
+
74
+ encoded = [ec.encode_event(e) for e in events]
75
+ decoded = [ec.decode_event_index(idx) for idx in encoded]
76
+ self.assertSequenceEqual(events, decoded)
77
+
78
+
79
+ class TestEventCodecErrorCases(unittest.TestCase):
80
+
81
+ def setUp(self):
82
+ self.event_ranges = [
83
+ EventRange("program", 0, 127),
84
+ EventRange("pitch", 0, 127),
85
+ EventRange("velocity", 0, 3),
86
+ EventRange("drum", 0, 127),
87
+ EventRange("tie", 0, 1),
88
+ ]
89
+ self.ec = Codec([], 1000, self.event_ranges)
90
+
91
+ def test_encode_event_with_invalid_event_type(self):
92
+ with self.assertRaises(ValueError):
93
+ self.ec.encode_event(Event("unknown_event_type", 50))
94
+
95
+ def test_encode_event_with_invalid_event_value(self):
96
+ with self.assertRaises(ValueError):
97
+ self.ec.encode_event(Event("program", 200))
98
+
99
+ def test_event_type_range_with_invalid_event_type(self):
100
+ with self.assertRaises(ValueError):
101
+ self.ec.event_type_range("unknown_event_type")
102
+
103
+ def test_decode_event_index_with_invalid_index(self):
104
+ with self.assertRaises(ValueError):
105
+ self.ec.decode_event_index(1000000)
106
+
107
+
108
+ class TestEventCodecVocabulary(unittest.TestCase):
109
+
110
+ def test_encode_event_using_program_vocabulary(self):
111
+ prog_vocab = {"Piano": [0, 1, 2, 3, 4, 5, 6, 7], "xxx": [50, 30, 120]}
112
+ ec = Codec(special_tokens=['asd'],
113
+ max_shift_steps=1001,
114
+ event_ranges=[
115
+ EventRange('pitch', min_value=0, max_value=127),
116
+ EventRange('velocity', min_value=0, max_value=1),
117
+ EventRange('tie', min_value=0, max_value=0),
118
+ EventRange('program', min_value=0, max_value=127),
119
+ EventRange('drum', min_value=0, max_value=127),
120
+ ],
121
+ program_vocabulary=prog_vocab)
122
+
123
+ events = [
124
+ Event(type='program', value=0), # 0 --> 0
125
+ Event(type='program', value=7), # 7 --> 0
126
+ Event(type='program', value=111), # 111 --> 111
127
+ Event(type='program', value=30), # 30 --> 50
128
+ ]
129
+ encoded = [ec.encode_event(e) for e in events]
130
+ expected = [1133, 1133, 1244, 1183]
131
+ self.assertSequenceEqual(encoded, expected)
132
+
133
+ def test_encode_event_using_drum_vocabulary(self):
134
+ drum_vocab = {"Kick": [50, 51, 52], "Snare": [53, 54]}
135
+ ec = Codec(special_tokens=['asd'],
136
+ max_shift_steps=1001,
137
+ event_ranges=[
138
+ EventRange('pitch', min_value=0, max_value=127),
139
+ EventRange('velocity', min_value=0, max_value=1),
140
+ EventRange('tie', min_value=0, max_value=0),
141
+ EventRange('program', min_value=0, max_value=127),
142
+ EventRange('drum', min_value=0, max_value=127),
143
+ ],
144
+ drum_vocabulary=drum_vocab)
145
+
146
+ events = [
147
+ Event(type='drum', value=50),
148
+ Event(type='drum', value=51),
149
+ Event(type='drum', value=53),
150
+ Event(type='drum', value=54),
151
+ ]
152
+ encoded = [ec.encode_event(e) for e in events]
153
+ self.assertEqual(encoded[0], encoded[1])
154
+ self.assertEqual(encoded[2], encoded[3])
155
+
156
+
157
+ if __name__ == '__main__':
158
+ unittest.main()
tests/metrics_test.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The YourMT3 Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Please see the details in the LICENSE file.
10
+ """metrics_test.py:
11
+
12
+ This file contains tests for the following classes:
13
+ • AMTMetrics
14
+
15
+ """
16
+ import unittest
17
+ import warnings
18
+ import torch
19
+ import numpy as np
20
+ from utils.metrics import AMTMetrics
21
+ from utils.metrics import compute_track_metrics
22
+
23
+
24
+ class TestAMTMetrics(unittest.TestCase):
25
+
26
+ def test_individual_attributes(self):
27
+ metric = AMTMetrics()
28
+
29
+ # Test updating the metric using .update() method
30
+ metric.onset_f.update(0.5)
31
+
32
+ # Test updating the metric using __call__() method
33
+ metric.onset_f(0.5)
34
+
35
+ # Test updating the metric with a weight
36
+ metric.onset_f(0, weight=1.0)
37
+
38
+ # Test computing the average value of the metric
39
+ computed_value = metric.onset_f.compute()
40
+ self.assertAlmostEqual(computed_value, 0.3333333333333333)
41
+
42
+ # Test resetting the metric
43
+ metric.onset_f.reset()
44
+ with self.assertWarns(UserWarning):
45
+ torch._assert(metric.onset_f.compute(), torch.nan)
46
+
47
+ # Test bulk_compute
48
+ with self.assertWarns(UserWarning):
49
+ computed_metrics = metric.bulk_compute()
50
+
51
+ def test_bulk_update_and_compute(self):
52
+ metric = AMTMetrics()
53
+
54
+ # Test bulk_update with values only
55
+ d1 = {'onset_f': 0.5, 'offset_f': 0.5}
56
+ metric.bulk_update(d1)
57
+
58
+ # Test bulk_update with values and weights
59
+ d2 = {'onset_f': {'value': 0.5, 'weight': 1.0}, 'offset_f': {'value': 0.5, 'weight': 1.0}}
60
+ metric.bulk_update(d2)
61
+
62
+ # Test bulk_compute
63
+ computed_metrics = metric.bulk_compute()
64
+
65
+ # Ensure the 'onset_f' and 'offset_f' keys exist in the computed_metrics dictionary
66
+ self.assertIn('onset_f', computed_metrics)
67
+ self.assertIn('offset_f', computed_metrics)
68
+
69
+ # Check the computed values
70
+ self.assertAlmostEqual(computed_metrics['onset_f'], 0.5)
71
+ self.assertAlmostEqual(computed_metrics['offset_f'], 0.5)
72
+
73
+ def test_compute_track_metrics_singing(self):
74
+ from config.vocabulary import SINGING_SOLO_CLASS, GM_INSTR_CLASS_PLUS
75
+ from utils.event2note import note_event2note
76
+
77
+ ref_notes_dict = np.load('extras/examples/singing_notes.npy', allow_pickle=True).tolist()
78
+ ref_note_events_dict = np.load('extras/examples/singing_note_events.npy', allow_pickle=True).tolist()
79
+ est_notes, _ = note_event2note(ref_note_events_dict['note_events'])
80
+ ref_notes = ref_notes_dict['notes']
81
+
82
+ metric = AMTMetrics(prefix=f'test/', extra_classes=[k for k in SINGING_SOLO_CLASS.keys()])
83
+ drum_metric, non_drum_metric, instr_metric = compute_track_metrics(est_notes,
84
+ ref_notes,
85
+ eval_vocab=SINGING_SOLO_CLASS,
86
+ eval_drum_vocab=None,
87
+ onset_tolerance=0.05)
88
+ metric.bulk_update(drum_metric)
89
+ metric.bulk_update(non_drum_metric)
90
+ metric.bulk_update(instr_metric)
91
+ computed_metrics = metric.bulk_compute()
92
+ cnt = 0
93
+ for k, v in computed_metrics.items():
94
+ if 'Singing Voice' in k:
95
+ self.assertEqual(v, 1.0)
96
+ cnt += 1
97
+ self.assertEqual(cnt, 6)
98
+
99
+ metric = AMTMetrics(prefix=f'test/', extra_classes=[k for k in GM_INSTR_CLASS_PLUS.keys()])
100
+ drum_metric, non_drum_metric, instr_metric = compute_track_metrics(est_notes,
101
+ ref_notes,
102
+ eval_vocab=GM_INSTR_CLASS_PLUS,
103
+ eval_drum_vocab=None,
104
+ onset_tolerance=0.05)
105
+ metric.bulk_update(drum_metric)
106
+ metric.bulk_update(non_drum_metric)
107
+ metric.bulk_update(instr_metric)
108
+ computed_metrics = metric.bulk_compute()
109
+ cnt = 0
110
+ for k, v in computed_metrics.items():
111
+ if 'Singing Voice' in k:
112
+ self.assertEqual(v, 1.0)
113
+ cnt += 1
114
+ self.assertEqual(cnt, 6)
115
+
116
+
117
+ if __name__ == '__main__':
118
+ unittest.main()
tests/midi_test.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ from typing import List
3
+ from tempfile import NamedTemporaryFile
4
+ from assert_fns import assert_notes_almost_equal
5
+ from utils.note_event_dataclasses import Note
6
+
7
+ from utils.midi import note_event2midi
8
+ from utils.midi import midi2note
9
+ from utils.note2event import note2note_event
10
+ # yapf: disable
11
+
12
+ class TestNoteMidiConversion(unittest.TestCase):
13
+
14
+ def test_note2midi2note_z(self):
15
+ original_notes = [
16
+ Note(is_drum=False, program=3, onset=0., offset=1., pitch=60, velocity=1),
17
+ Note(is_drum=False, program=3, onset=1., offset=2., pitch=64, velocity=1),
18
+ ]
19
+
20
+ with NamedTemporaryFile(suffix=".mid", delete=True) as temp_file:
21
+ # Convert original_notes to MIDI and save it to the temporary file
22
+ note_events = note2note_event(notes=original_notes, sort=True)
23
+ note_event2midi(note_events, temp_file.name, velocity=100)
24
+
25
+ # Convert the MIDI back to notes
26
+ converted_notes, _ = midi2note(temp_file.name)
27
+
28
+ # Compare original notes and converted notes
29
+ assert_notes_almost_equal(original_notes, converted_notes)
30
+
31
+ def test_midi2note2midi2note_piano_z(self):
32
+ file = 'extras/examples/piano.mid'
33
+ # This MIDI file is missing the program change event, so we force it to be 0
34
+ notes, _ = midi2note(file, quantize=False, force_all_program_to=0)[:1000]
35
+ note_events = note2note_event(notes=notes, sort=True)
36
+ note_event2midi(note_events, 'extras/examples/piano_converted.mid', velocity=100)
37
+ reconverted_notes, _ = midi2note('extras/examples/piano_converted.mid', quantize=False)
38
+ assert_notes_almost_equal(notes, reconverted_notes, delta=0.01)
39
+
40
+ def test_midi2note2midi2note_force_drum_z(self):
41
+ file = 'extras/examples/drum.mid'
42
+ conv_file = 'extras/examples/drum_converted.mid'
43
+ # This MIDI file is missing the program change event, so we force it to be 0
44
+ notes, _ = midi2note(file, quantize=True, force_all_drum=True)[:100]
45
+ note_events = note2note_event(notes=notes, sort=True)
46
+ note_event2midi(note_events, conv_file, velocity=100, ticks_per_beat=960)
47
+ reconverted_notes, _ = midi2note(conv_file, quantize=True, force_all_drum=True)
48
+ assert_notes_almost_equal(notes, reconverted_notes, delta=0.005)
49
+
50
+ # In drum, this is very inaccurate. We should fix this in the future.
51
+ # Even for the first 100 notes, the timing is off by 170 ms.
52
+
53
+ def test_midi2note_ignore_pedal_true_z(self):
54
+ file = 'extras/examples/piano.mid'
55
+ notes, _ = midi2note(file, quantize=False, ignore_pedal=True, force_all_program_to=0)
56
+ note_events = note2note_event(notes=notes, sort=True)
57
+ note_event2midi(note_events, 'extras/examples/piano_converted.mid', velocity=100)
58
+ reconverted_notes, _ = midi2note('extras/examples/piano_converted.mid', quantize=False)
59
+ assert_notes_almost_equal(notes, reconverted_notes, delta=0.01)
60
+
61
+
62
+ # yapf: enable
63
+
64
+ if __name__ == '__main__':
65
+ unittest.main()