Skip to content
This repository has been archived by the owner on Apr 1, 2024. It is now read-only.

Differential Privacy #2

Open
wants to merge 5 commits into
base: dp-patch
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions espresso/models/speech_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_layers=args.encoder_rnn_layers,
dropout_in=args.encoder_rnn_dropout_in,
dropout_out=args.encoder_rnn_dropout_out,
bidirectional=args.encoder_rnn_bidirectional,
bidirectional=False,
residual=args.encoder_rnn_residual,
src_bucketed=(getattr(task.cfg, "num_batch_buckets", 0) > 0),
max_source_positions=max_source_positions,
Expand Down Expand Up @@ -916,7 +916,7 @@ def base_architecture(args):
)
args.encoder_rnn_hidden_size = getattr(args, "encoder_rnn_hidden_size", 320)
args.encoder_rnn_layers = getattr(args, "encoder_rnn_layers", 3)
args.encoder_rnn_bidirectional = getattr(args, "encoder_rnn_bidirectional", True)
args.encoder_rnn_bidirectional = getattr(args, "encoder_rnn_bidirectional", False)
args.encoder_rnn_residual = getattr(args, "encoder_rnn_residual", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 48)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
Expand Down
2 changes: 1 addition & 1 deletion espresso/models/speech_lstm_encoder_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ def base_architecture(args):
)
args.encoder_rnn_hidden_size = getattr(args, "encoder_rnn_hidden_size", 320)
args.encoder_rnn_layers = getattr(args, "encoder_rnn_layers", 3)
args.encoder_rnn_bidirectional = getattr(args, "encoder_rnn_bidirectional", True)
args.encoder_rnn_bidirectional = getattr(args, "encoder_rnn_bidirectional", False)
args.encoder_rnn_residual = getattr(args, "encoder_rnn_residual", False)
args.encoder_rnn_dropout_in = getattr(args, "encoder_rnn_dropout_in", args.dropout)
args.encoder_rnn_dropout_out = getattr(args, "encoder_rnn_dropout_out", args.dropout)
Expand Down
7 changes: 6 additions & 1 deletion espresso/speech_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ def main(cfg: DictConfig) -> None:

# Build model and criterion
model = task.build_model(cfg.model)

from opendp.network.odometer import PrivacyOdometer
odometer = PrivacyOdometer(step_epsilon=1.0)
model = odometer.make_tracked_view(model)

criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
Expand Down Expand Up @@ -129,7 +134,7 @@ def main(cfg: DictConfig) -> None:
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > cfg.optimization.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
while lr > 0.00000001 and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
Expand Down