librispeech.py 2.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. import json
  2. import pathlib
  3. import numpy as np
  4. import librosa
  5. import soundfile
  6. """
  7. The dataset has to be downloaded manually from https://www.openslr.org/12/ and put in `extra/datasets/librispeech`.
  8. For mlperf validation the dev-clean dataset is used.
  9. Then all the flacs have to be converted to wav using something like:
  10. ```fish
  11. for file in $(find * | grep flac); do ffmpeg -i $file -ar 16k "$(dirname $file)/$(basename $file .flac).wav"; done
  12. ```
  13. Then this [file](https://github.com/mlcommons/inference/blob/master/speech_recognition/rnnt/dev-clean-wav.json) has to also be put in `extra/datasets/librispeech`.
  14. """
  15. BASEDIR = pathlib.Path(__file__).parent / "librispeech"
  16. with open(BASEDIR / "dev-clean-wav.json") as f:
  17. ci = json.load(f)
  18. FILTER_BANK = np.expand_dims(librosa.filters.mel(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000), 0)
  19. WINDOW = librosa.filters.get_window("hann", 320)
  20. def feature_extract(x, x_lens):
  21. x_lens = np.ceil((x_lens / 160) / 3).astype(np.int32)
  22. # pre-emphasis
  23. x = np.concatenate((np.expand_dims(x[:, 0], 1), x[:, 1:] - 0.97 * x[:, :-1]), axis=1)
  24. # stft
  25. x = librosa.stft(x, n_fft=512, window=WINDOW, hop_length=160, win_length=320, center=True, pad_mode="reflect")
  26. x = np.stack((x.real, x.imag), axis=-1)
  27. # power spectrum
  28. x = (x**2).sum(-1)
  29. # mel filter bank
  30. x = np.matmul(FILTER_BANK, x)
  31. # log
  32. x = np.log(x + 1e-20)
  33. # feature splice
  34. seq = [x]
  35. for i in range(1, 3):
  36. tmp = np.zeros_like(x)
  37. tmp[:, :, :-i] = x[:, :, i:]
  38. seq.append(tmp)
  39. features = np.concatenate(seq, axis=1)[:, :, ::3]
  40. # normalize
  41. features_mean = np.zeros((features.shape[0], features.shape[1]), dtype=np.float32)
  42. features_std = np.zeros((features.shape[0], features.shape[1]), dtype=np.float32)
  43. for i in range(features.shape[0]):
  44. features_mean[i, :] = features[i, :, :x_lens[i]].mean(axis=1)
  45. features_std[i, :] = features[i, :, :x_lens[i]].std(axis=1, ddof=1)
  46. features_std += 1e-5
  47. features = (features - np.expand_dims(features_mean, 2)) / np.expand_dims(features_std, 2)
  48. return features.transpose(2, 0, 1), x_lens.astype(np.float32)
  49. def load_wav(file):
  50. sample = soundfile.read(file)[0].astype(np.float32)
  51. return sample, sample.shape[0]
  52. def iterate(bs=1, start=0):
  53. print(f"there are {len(ci)} samples in the dataset")
  54. for i in range(start, len(ci), bs):
  55. samples, sample_lens = zip(*[load_wav(BASEDIR / v["files"][0]["fname"]) for v in ci[i : i + bs]])
  56. samples = list(samples)
  57. # pad to same length
  58. max_len = max(sample_lens)
  59. for j in range(len(samples)):
  60. samples[j] = np.pad(samples[j], (0, max_len - sample_lens[j]), "constant")
  61. samples, sample_lens = np.array(samples), np.array(sample_lens)
  62. yield feature_extract(samples, sample_lens), np.array([v["transcript"] for v in ci[i : i + bs]])
  63. if __name__ == "__main__":
  64. X, Y = next(iterate())
  65. print(X[0].shape, Y.shape)