Skip to content

Commit

Permalink
will run on windows (#75)
Browse files Browse the repository at this point in the history
  • Loading branch information
Hiroshiba authored Aug 24, 2019
1 parent caccab4 commit 5f03f0c
Show file tree
Hide file tree
Showing 11 changed files with 193 additions and 183 deletions.
3 changes: 2 additions & 1 deletion scripts/check_silence.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
parser.add_argument('--speaker_yml', type=Path)
parser.add_argument('--pad_second', type=float, default=base_acoustic_param.pad_second)
arguments = parser.parse_args()
pprint(vars(arguments))

# read parameters from speaker yml
sconf = SpeakerYML(arguments.speaker_yml)
Expand Down Expand Up @@ -78,6 +77,8 @@ def calc_score(path: Path):


def main():
pprint(vars(arguments))

paths = [Path(p) for p in sorted(glob.glob(arguments.input_wave_glob))]
pool = multiprocessing.Pool()
it = pool.imap(calc_score, paths)
Expand Down
3 changes: 2 additions & 1 deletion scripts/convert_acoustic_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
parser.add_argument('--ignore_feature', nargs='+', default=['sp', 'ap'])
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(vars(arguments))


def convert_feature(path: Path, acoustic_converter: AcousticConverter):
Expand All @@ -40,6 +39,8 @@ def convert_feature(path: Path, acoustic_converter: AcousticConverter):


def main():
pprint(vars(arguments))

arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')

Expand Down
3 changes: 2 additions & 1 deletion scripts/extract_acoustic_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
parser.add_argument('--ignore_feature', nargs='+', default=['sp', 'ap'])
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(vars(arguments))


def generate_feature(path: Path):
Expand Down Expand Up @@ -85,6 +84,8 @@ def generate_feature(path: Path):


def main():
pprint(vars(arguments))

arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')

Expand Down
3 changes: 2 additions & 1 deletion scripts/extract_align_indexes.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
parser.add_argument('--ignore_feature', nargs='+', default=('feature1', 'feature2'))
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(vars(arguments))


def generate_align_indexes(pair_path: Tuple[Path, Path]):
Expand All @@ -45,6 +44,8 @@ def generate_align_indexes(pair_path: Tuple[Path, Path]):


def main():
pprint(vars(arguments))

arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')

Expand Down
3 changes: 2 additions & 1 deletion scripts/extract_converted_spectrogram_pair.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
parser.add_argument('--output', '-o', type=Path)
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(vars(arguments))

input_glob: Path = arguments.input_glob
target_glob: Path = arguments.target_glob
Expand Down Expand Up @@ -53,6 +52,8 @@ def generate_file(pair_path: Tuple[Path, Path, Path]):


def main():
pprint(vars(arguments))

output.mkdir(exist_ok=True)
save_arguments(arguments, output / 'arguments.json')

Expand Down
3 changes: 2 additions & 1 deletion scripts/extract_f0_statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
parser.add_argument('--input_glob', '-i')
parser.add_argument('--output', '-o', type=Path)
arguments = parser.parse_args()
pprint(vars(arguments))


def load_f0(path: Path):
Expand All @@ -22,6 +21,8 @@ def load_f0(path: Path):


def main():
pprint(vars(arguments))

paths = [Path(p) for p in sorted(glob.glob(arguments.input_glob))]

pool = multiprocessing.Pool()
Expand Down
3 changes: 2 additions & 1 deletion scripts/extract_gmm_align_indexes.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
parser.add_argument('--ignore_feature', nargs='+', default=('feature1', 'feature2'))
parser.add_argument('--enable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(vars(arguments))

# read parameters from speaker yml
sconf1 = SpeakerYML(arguments.org_yml)
Expand Down Expand Up @@ -130,6 +129,8 @@ def generate_align_indexes(pair_path: Tuple[Path, Path]):


def main():
pprint(vars(arguments))

arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')

Expand Down
3 changes: 2 additions & 1 deletion scripts/generate_aligned_wave.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
parser.add_argument('--alpha', type=float, default=base_acoustic_param.alpha)
parser.add_argument('--disable_overwrite', action='store_true')
arguments = parser.parse_args()
pprint(vars(arguments))


def generate_aligned_wave(
Expand Down Expand Up @@ -72,6 +71,8 @@ def generate_aligned_wave(


def main():
pprint(vars(arguments))

arguments.output.mkdir(exist_ok=True)
save_arguments(arguments, arguments.output / 'arguments.json')

Expand Down
167 changes: 84 additions & 83 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,86 +20,87 @@
from yukarin.model import create
from yukarin.updater import Updater

parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()

config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())

# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}

if config.train.pretrained_model is not None:
serializers.load_npz(str(config.train.pretrained_model), predictor)

# dataset
dataset = create_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)


# optimizer
def create_optimizer(model):
cp: Dict[str, Any] = copy(config.train.optimizer)
n = cp.pop('name').lower()

if n == 'adam':
optimizer = optimizers.Adam(**cp)
elif n == 'sgd':
optimizer = optimizers.SGD(**cp)
else:
raise ValueError(n)

optimizer.setup(model)
return optimizer


opts = {key: create_optimizer(model) for key, model in models.items()}

# updater
converter = partial(convert.concat_examples, padding=0)
updater = Updater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)

# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')
trigger_stop = (config.train.stop_iteration, 'iteration') if config.train.stop_iteration is not None else None

trainer = training.Trainer(updater, stop_trigger=trigger_stop, out=arguments.output)
tb_writer = SummaryWriter(Path(arguments.output))

ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)

trainer.extend(extensions.dump_graph('predictor/loss'))

ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)

trainer.extend(extensions.LogReport(trigger=trigger_log))
trainer.extend(TensorBoardReport(writer=tb_writer), trigger=trigger_log)

if trigger_stop is not None:
trainer.extend(extensions.ProgressBar(trigger_stop))

trainer.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_json_path', type=Path)
parser.add_argument('output', type=Path)
arguments = parser.parse_args()

config = create_from_json(arguments.config_json_path)
arguments.output.mkdir(exist_ok=True)
config.save_as_json((arguments.output / 'config.json').absolute())

# model
if config.train.gpu >= 0:
cuda.get_device_from_id(config.train.gpu).use()
predictor, discriminator = create(config.model)
models = {
'predictor': predictor,
'discriminator': discriminator,
}

if config.train.pretrained_model is not None:
serializers.load_npz(str(config.train.pretrained_model), predictor)

# dataset
dataset = create_dataset(config.dataset)
train_iter = MultiprocessIterator(dataset['train'], config.train.batchsize)
test_iter = MultiprocessIterator(dataset['test'], config.train.batchsize, repeat=False, shuffle=False)
train_eval_iter = MultiprocessIterator(dataset['train_eval'], config.train.batchsize, repeat=False, shuffle=False)


# optimizer
def create_optimizer(model):
cp: Dict[str, Any] = copy(config.train.optimizer)
n = cp.pop('name').lower()

if n == 'adam':
optimizer = optimizers.Adam(**cp)
elif n == 'sgd':
optimizer = optimizers.SGD(**cp)
else:
raise ValueError(n)

optimizer.setup(model)
return optimizer


opts = {key: create_optimizer(model) for key, model in models.items()}

# updater
converter = partial(convert.concat_examples, padding=0)
updater = Updater(
loss_config=config.loss,
predictor=predictor,
discriminator=discriminator,
device=config.train.gpu,
iterator=train_iter,
optimizer=opts,
converter=converter,
)

# trainer
trigger_log = (config.train.log_iteration, 'iteration')
trigger_snapshot = (config.train.snapshot_iteration, 'iteration')
trigger_stop = (config.train.stop_iteration, 'iteration') if config.train.stop_iteration is not None else None

trainer = training.Trainer(updater, stop_trigger=trigger_stop, out=arguments.output)
tb_writer = SummaryWriter(Path(arguments.output))

ext = extensions.Evaluator(test_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='test', trigger=trigger_log)
ext = extensions.Evaluator(train_eval_iter, models, converter, device=config.train.gpu, eval_func=updater.forward)
trainer.extend(ext, name='train', trigger=trigger_log)

trainer.extend(extensions.dump_graph('predictor/loss'))

ext = extensions.snapshot_object(predictor, filename='predictor_{.updater.iteration}.npz')
trainer.extend(ext, trigger=trigger_snapshot)

trainer.extend(extensions.LogReport(trigger=trigger_log))
trainer.extend(TensorBoardReport(writer=tb_writer), trigger=trigger_log)

if trigger_stop is not None:
trainer.extend(extensions.ProgressBar(trigger_stop))

trainer.run()
Loading

0 comments on commit 5f03f0c

Please sign in to comment.