Spaces:
Build error
Build error
| import gc | |
| import json | |
| import librosa | |
| import laion_clap | |
| import torch | |
| import numpy as np | |
| import time | |
| from itertools import islice | |
| from safetensors import safe_open | |
| from safetensors.numpy import save_file | |
| def read_default_prompt(): | |
| import json | |
| with open('/root/autodl-tmp/dedup_audio_text_80.json', 'r') as f: | |
| data = json.load(f) | |
| return data | |
| def init_audio_pipe(): | |
| # quantization | |
| def int16_to_float32(x): | |
| return (x / 32767.0).astype(np.float32) | |
| def float32_to_int16(x): | |
| x = np.clip(x, a_min=-1., a_max=1.) | |
| return (x * 32767.).astype(np.int16) | |
| model = laion_clap.CLAP_Module(enable_fusion=False) | |
| model.load_ckpt() # download the default pretrained checkpoint. | |
| # Get audio embeddings from audio data | |
| audio_data, _ = librosa.load('/root/autodl-tmp/下载.wav', sr=48000) # sample rate should be 48000 | |
| audio_data = audio_data.reshape(1, -1) # Make it (1,T) or (N,T) | |
| audio_data = torch.from_numpy( | |
| int16_to_float32(float32_to_int16(audio_data))).float() # quantize before send it in to the model | |
| audio_embed = model.get_audio_embedding_from_data(x=audio_data, use_tensor=True) | |
| # print(audio_embed[:, -20:]) | |
| print(audio_embed) | |
| print(audio_embed.shape) | |
| # Get text embedings from texts, but return torch tensor: | |
| start_time = time.time() | |
| # change this file to iterator the text_data batch_size 300 and save the embedding to audio_text.safetensors | |
| text_data = read_default_prompt() | |
| batch_size = 256 | |
| num_batches = int(np.ceil(len(text_data) / batch_size)) | |
| text_embed = [] | |
| for i in range(num_batches): | |
| # Get the next batch of text data | |
| batch_data = list(islice(text_data, i * batch_size, (i + 1) * batch_size)) | |
| # Embed the batch of text data | |
| batch_embed = model.get_text_embedding(batch_data, use_tensor=False) | |
| # Append the batch embeddings to the list | |
| text_embed.append(batch_embed) | |
| # Concatenate the embeddings from all batches into a single tensor | |
| text_embed = np.concatenate(text_embed) | |
| # Save the embeddings to a file | |
| print(text_embed) | |
| print(text_embed.shape) | |
| tensors = { | |
| "text_embed": text_embed, | |
| } | |
| save_file(tensors, "/root/autodl-tmp/audio_text_embeddings.safetensors") | |
| # end_time = time.time() | |
| # print(end_time - start_time) | |
| # | |
| # result_tensor = torch.matmul(audio_embed, text_embed.transpose(0, 1)) | |
| # similarity_scores = torch.softmax(result_tensor, dim=1) | |
| # print(similarity_scores) | |
| if __name__ == "__main__": | |
| init_audio_pipe() | |