-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdistribute_proposed_method.py
105 lines (77 loc) · 3.39 KB
/
distribute_proposed_method.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import os
import pickle
import numpy as np
import soundfile as sf
from keras.models import load_model
from audio_preprocessing import get_log_mel_madmom
from audio_preprocessing import feature_reshape
from audio_preprocessing import VAD
import pyximport
pyximport.install(reload_support=True,
setup_args={'include_dirs': np.get_include()})
import viterbiDecodingPhonemeSeg
from general.parameters import hopsize_t
from general.parameters import varin
from general.utilFunctions import smooth_obs
from general.utilFunctions import parse_score
from general.utilFunctions import get_onset_time_syllable_duration_ref
from plot_code import figure_plot_joint
root_path = os.path.join(os.path.dirname(__file__))
joint_cnn_model_path = os.path.join(root_path, 'cnnModels', 'joint')
# load keras joint cnn model
model_joint = load_model(os.path.join(joint_cnn_model_path, 'jan_joint0.h5'))
# load log mel feature scaler
scaler_joint = pickle.load(open(os.path.join(joint_cnn_model_path, 'scaler_joint.pkl'), 'rb'), encoding='latin')
# load wav, duration and labels
wav_file = './data/reference_exercise_03_norm.wav'
score_file = './data/score_exercise_03.txt'
score_png = './data/exercise_03.png'
tempo, syllable_durations, syllable_labels, beats = parse_score(filename_score=score_file)
print('syllable durations (second):')
print(syllable_durations)
print('\n')
print('syllable labels:')
print(syllable_labels)
print('\n')
print(beats)
# get wav duration
data_wav, fs_wav = sf.read(wav_file)
time_wav = len(data_wav)/float(fs_wav)
onset_time_ref, syllable_durations_ref = get_onset_time_syllable_duration_ref(syllable_durations=syllable_durations,
len_audio=time_wav)
print(onset_time_ref)
print(syllable_durations_ref)
results_vad = VAD(wav_file=wav_file, hopsize_t=hopsize_t)
# calculate log mel feature
log_mel_old = get_log_mel_madmom(wav_file, fs=fs_wav, hopsize_t=hopsize_t, channel=1)
log_mel = scaler_joint.transform(log_mel_old)
log_mel = feature_reshape(log_mel, nlen=7)
log_mel = np.expand_dims(log_mel, axis=1)
# get the onset detection function
obs_syllable, obs_phoneme = model_joint.predict(log_mel, batch_size=128, verbose=2)
# post-processing the detection function
obs_syllable = np.squeeze(obs_syllable)
obs_phoneme = np.squeeze(obs_phoneme)
obs_syllable = smooth_obs(obs_syllable)
obs_phoneme = smooth_obs(obs_phoneme)
obs_syllable[0] = 1.0
obs_syllable[-1] = 1.0
# normalize the syllable durations
syllable_durations *= time_wav / np.sum(syllable_durations)
# decoding syllable boundaries
boundaries_syllable = viterbiDecodingPhonemeSeg.viterbiSegmental2(obs_syllable, syllable_durations, varin)
# syllable boundaries
boundaries_syllable_start_time = np.array(boundaries_syllable[:-1])*hopsize_t
boundaries_syllable_end_time = np.array(boundaries_syllable[1:])*hopsize_t
syllable_durations_detected = boundaries_syllable_end_time - boundaries_syllable_start_time
print('Detected syllable onset times (second):')
print(boundaries_syllable_start_time)
print('\n')
print(syllable_durations_detected)
figure_plot_joint(score_png=score_png,
mfcc_line=log_mel_old,
onset_time_ref=onset_time_ref,
vad=results_vad,
obs_syllable=obs_syllable,
boundaries_syllable_start_time=boundaries_syllable_start_time,
labels_syllable=syllable_labels)