forked from gwinndr/MusicTransformer-Pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
generate.py
94 lines (70 loc) · 2.89 KB
/
generate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import torch
import torch.nn as nn
import os
import random
from third_party.midi_processor.processor import decode_midi, encode_midi
from utilities.argument_funcs import parse_generate_args, print_generate_args
from model.music_transformer import MusicTransformer
from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, process_midi
from torch.utils.data import DataLoader
from torch.optim import Adam
from utilities.constants import *
from utilities.device import get_device, use_cuda
# main
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Generates music from a model specified by command line arguments
----------
"""
args = parse_generate_args()
print_generate_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
os.makedirs(args.output_dir, exist_ok=True)
# Grabbing dataset if needed
_, _, dataset = create_epiano_datasets(args.midi_root, args.num_prime, random_seq=False)
# Can be None, an integer index to dataset, or a file path
if(args.primer_file is None):
f = str(random.randrange(len(dataset)))
else:
f = args.primer_file
if(f.isdigit()):
idx = int(f)
primer, _ = dataset[idx]
primer = primer.to(get_device())
print("Using primer index:", idx, "(", dataset.data_files[idx], ")")
else:
raw_mid = encode_midi(f)
if(len(raw_mid) == 0):
print("Error: No midi messages in primer file:", f)
return
primer, _ = process_midi(raw_mid, args.num_prime, random_seq=False)
primer = torch.tensor(primer, dtype=TORCH_LABEL_TYPE, device=get_device())
print("Using primer file:", f)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
model.load_state_dict(torch.load(args.model_weights))
# Saving primer first
f_path = os.path.join(args.output_dir, "primer.mid")
decode_midi(primer[:args.num_prime].cpu().numpy(), file_path=f_path)
# GENERATION
model.eval()
with torch.set_grad_enabled(False):
if(args.beam > 0):
print("BEAM:", args.beam)
beam_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=args.beam)
f_path = os.path.join(args.output_dir, "beam.mid")
decode_midi(beam_seq[0].cpu().numpy(), file_path=f_path)
else:
print("RAND DIST")
rand_seq = model.generate(primer[:args.num_prime], args.target_seq_length, beam=0)
f_path = os.path.join(args.output_dir, "rand.mid")
decode_midi(rand_seq[0].cpu().numpy(), file_path=f_path)
if __name__ == "__main__":
main()