import json import os from pydub import AudioSegment import tqdm import hashlib EXCERPT_LENGTH = 30 * 1000 # 30 seconds in milliseconds MIN_LENGTH = 5 * 1000 # 5 seconds in milliseconds PATH = "/work/fast_data_yinghao/GS_Tempo" audio_list = os.listdir(f"{PATH}/giantsteps-tempo-dataset/audio") existed_uuid_list = set() data_samples = [] for audio in tqdm.tqdm(audio_list): audio_path = os.path.join(f"{PATH}/giantsteps-tempo-dataset/audio", audio) # f"new_audio/{os.path.basename(audio)}.wav") output = open(f"{PATH}/giantsteps-tempo-dataset/annotations_v2/tempo/{audio[:-4]}.LOFI.bpm", "r").readline().strip() data_sample = { "instruction": "Please estimate the tempo of the music. Such as 60.0bpm, 143.2bpm etc.", "input": f"<|SOA|>{audio}<|EOA|>", "output": output, "uuid": "", "audioid": audio, "split": ["train"], "task_type": {"major": ["global_MIR"], "minor": ["tempo_esitimation"]}, "domain": "music", "source": "GS_Tempo", "other": {} } # change uuid uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}" unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] #只取前16位 if unique_id in existed_uuid_list: sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] # 为了相加的时候位数对应上 # 将 MD5 和 SHA1 结果相加,并计算新的 MD5 作为最终的 UUID unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16] existed_uuid_list.add(unique_id) data_sample["uuid"] = f"{unique_id}" # data_samples.append(data_sample) # # change uuid # uuid_string = f"{data_sample['instruction']}#{data_sample['input']}#{data_sample['output']}" # unique_id = hashlib.md5(uuid_string.encode()).hexdigest()[:16] #只取前16位 # if unique_id in existed_uuid_list: # sha1_hash = hashlib.sha1(uuid_string.encode()).hexdigest()[:16] # 为了相加的时候位数对应上 # 将 MD5 和 SHA1 结果相加,并计算新的 MD5 作为最终的 UUID # unique_id = hashlib.md5((unique_id + sha1_hash).encode()).hexdigest()[:16] # existed_uuid_list.add(unique_id) # data_sample["uuid"] = f"{unique_id}" if not os.path.exists(audio_path): print(f"File not found: {audio_path}") continue # Load the audio file audio = AudioSegment.from_file(audio_path) # Process and split the audio into 30-second excerpts for i in range(0, len(audio), EXCERPT_LENGTH): end = i + EXCERPT_LENGTH if end < len(audio): excerpt = audio[i:end] else: excerpt = audio[i:] # Discard short audio clips if len(excerpt) < MIN_LENGTH: break end = len(audio) # # Save the excerpt to the same directory with a new name excerpt_path = f"{PATH}/new_audio/{os.path.basename(audio_path)[:-4]}_{i//EXCERPT_LENGTH}.wav" excerpt.export(excerpt_path, format="wav") # Update the sample dictionary with the new audio path # new_sample = data_sample.copy() # new_sample["audioid"] = excerpt_path # new_sample["input"] = f"<|SOA|>{excerpt_path}<|EOA|>" data_samples.append({ "instruction": "Please estimate the tempo of the music. Such as 60.0bpm, 143.2bpm etc.", "input": f"<|SOA|>{excerpt_path}<|EOA|>", "output": output, "uuid": "", "audioid": excerpt_path, "split": ["train"], "task_type": {"major": ["global_MIR"], "minor": ["tempo_esitimation"]}, "domain": "music", "source": "GS_Tempo", "other": {} }) # print(data_samples[-1]) # break # Save to JSONL format output_file_path = f'{PATH}/GS-Tempo_train.jsonl' # Replace with the desired output path with open(output_file_path, 'w') as outfile: # for sample in data_samples: json.dump(data_samples, outfile) # outfile.write('\n') outfile.close()