File size: 1,481 Bytes
acaa03c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6d7afb
acaa03c
 
 
 
 
d6d7afb
acaa03c
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os, random, copy
import numpy as np
import torch
import pandas as pd
import torchaudio
from tqdm.notebook import tqdm
import collections, json
import re, sys
import os, copy
from pathlib import Path
from typing import Optional
import whisper

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model('large-v2')
model.eval()

data = torch.load('./train_chime4.pt')

data_with_speech = []
for item in data:
    with torch.no_grad():
        ### TO FILL BY USERS:
        # use utterance id (item['id']) to retrieve parallel audio paths: clean_audio_path, noisy_audio_path

        ### extract clean audio feats
        clean_audio = whisper.load_audio(clean_audio_path)
        # clean_audio = whisper.pad_or_trim(clean_audio)    # padding to 30s
        clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device)
        clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0]

        # noisy audio feats
        noisy_audio = whisper.load_audio(noisy_audio_path)
        # noisy_audio = whisper.pad_or_trim(noisy_audio)    # padding to 30s
        noisy_mel = whisper.log_mel_spectrogram(noisy_audio).to(model.device)
        noisy_audio_features = model.encoder(noisy_mel.unsqueeze(0))[0]

        item_with_speech = {**item, 'audio_features': noisy_audio_features, 'clean_audio_features': clean_audio_features}
        data_with_speech.append(item_with_speech)

torch.save(data_with_speech, './train_chime4_with_speech.pt')