-
Notifications
You must be signed in to change notification settings - Fork 458
/
Copy pathlm_dpsgd_tutorial.py
218 lines (184 loc) · 8.4 KB
/
lm_dpsgd_tutorial.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training a language model (recurrent neural network) with DP-SGD optimizer.
This tutorial uses a corpus of text from TensorFlow datasets unless a
FLAGS.data_dir is specified with the path to a directory containing two files
train.txt and test.txt corresponding to a training and test corpus.
Even though we haven't done any hyperparameter tuning, and the analytical
epsilon upper bound can't offer any strong guarantees, the benefits of training
with differential privacy can be clearly seen by examining the trained model.
In particular, such inspection can confirm that the set of training-data
examples that the model fails to learn (i.e., has high perplexity for) comprises
outliers and rare sentences outside the distribution to be learned (see examples
and a discussion in this blog post). This can be further confirmed by
testing the differentially-private model's propensity for memorization, e.g.,
using the exposure metric of https://arxiv.org/abs/1802.08232.
This example is decribed in more details in this post: https://goo.gl/UKr7vH
"""
import os
from absl import app
from absl import flags
from absl import logging
import dp_accounting
import numpy as np
import tensorflow as tf
from tensorflow import estimator as tf_estimator
from tensorflow.compat.v1 import estimator as tf_compat_v1_estimator
import tensorflow_datasets as tfds
from tensorflow_privacy.privacy.optimizers import dp_optimizer
flags.DEFINE_boolean(
'dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 0.001,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 256, 'Batch size')
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
flags.DEFINE_integer(
'microbatches', 256, 'Number of microbatches '
'(must evenly divide batch_size)')
flags.DEFINE_string('model_dir', None, 'Model directory')
flags.DEFINE_string('data_dir', None, 'Directory containing the PTB data.')
FLAGS = flags.FLAGS
SEQ_LEN = 80
NB_TRAIN = 45000
def rnn_model_fn(features, labels, mode): # pylint: disable=unused-argument
"""Model function for a RNN."""
# Define RNN architecture using tf.keras.layers.
x = features['x']
x = tf.reshape(x, [-1, SEQ_LEN])
input_layer = x[:, :-1]
input_one_hot = tf.one_hot(input_layer, 256)
lstm = tf.keras.layers.LSTM(256, return_sequences=True)(input_one_hot)
logits = tf.keras.layers.Dense(256)(lstm)
# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.cast(tf.one_hot(x[:, 1:], 256), dtype=tf.float32),
logits=logits)
# Define mean of loss across minibatch (for reporting through tf.Estimator).
scalar_loss = tf.reduce_mean(vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf_estimator.ModeKeys.TRAIN:
if FLAGS.dpsgd:
optimizer = dp_optimizer.DPAdamGaussianOptimizer(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.microbatches,
learning_rate=FLAGS.learning_rate,
unroll_microbatches=True)
opt_loss = vector_loss
else:
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate)
opt_loss = scalar_loss
global_step = tf.compat.v1.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
return tf_estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode).
elif mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy':
tf.metrics.accuracy(
labels=tf.cast(x[:, 1:], dtype=tf.int32),
predictions=tf.argmax(input=logits, axis=2))
}
return tf_estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
def load_data():
"""Load training and validation data."""
if not FLAGS.data_dir:
print('FLAGS.data_dir containing train.txt and test.txt was not specified, '
'using a substitute dataset from the tensorflow_datasets module.')
train_dataset = tfds.load(
name='lm1b/subwords8k',
split=tfds.Split.TRAIN,
batch_size=NB_TRAIN,
shuffle_files=True)
test_dataset = tfds.load(
name='lm1b/subwords8k', split=tfds.Split.TEST, batch_size=10000)
train_data = next(iter(tfds.as_numpy(train_dataset)))
test_data = next(iter(tfds.as_numpy(test_dataset)))
train_data = train_data['text'].flatten()
test_data = test_data['text'].flatten()
else:
train_fpath = os.path.join(FLAGS.data_dir, 'train.txt')
test_fpath = os.path.join(FLAGS.data_dir, 'test.txt')
train_txt = open(train_fpath).read().split()
test_txt = open(test_fpath).read().split()
keys = sorted(set(train_txt))
remap = {k: i for i, k in enumerate(keys)}
train_data = np.array([remap[x] for x in train_txt], dtype=np.uint8)
test_data = np.array([remap[x] for x in test_txt], dtype=np.uint8)
return train_data, test_data
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / NB_TRAIN
accountant = dp_accounting.rdp.RdpAccountant(orders)
event = dp_accounting.SelfComposedDpEvent(
dp_accounting.PoissonSampledDpEvent(
sampling_probability,
dp_accounting.GaussianDpEvent(FLAGS.noise_multiplier)), steps)
accountant.compose(event)
# Delta is set to 1e-5 because Penn TreeBank has 60000 training points.
return accountant.get_epsilon(target_delta=1e-5)
def main(unused_argv):
logging.set_verbosity(logging.INFO)
if FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
# Load training and test data.
train_data, test_data = load_data()
# Instantiate the tf.Estimator.
conf = tf_estimator.RunConfig(save_summary_steps=1000)
lm_classifier = tf_estimator.Estimator(
model_fn=rnn_model_fn, model_dir=FLAGS.model_dir, config=conf)
# Create tf.Estimator input functions for the training and test data.
batch_len = FLAGS.batch_size * SEQ_LEN
train_data_end = len(train_data) - len(train_data) % batch_len
test_data_end = len(test_data) - len(test_data) % batch_len
train_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
x={'x': train_data[:train_data_end]},
batch_size=batch_len,
num_epochs=FLAGS.epochs,
shuffle=False)
eval_input_fn = tf_compat_v1_estimator.inputs.numpy_input_fn(
x={'x': test_data[:test_data_end]},
batch_size=batch_len,
num_epochs=1,
shuffle=False)
# Training loop.
steps_per_epoch = len(train_data) // batch_len
for epoch in range(1, FLAGS.epochs + 1):
print('epoch', epoch)
# Train the model for one epoch.
lm_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)
if epoch % 5 == 0:
name_input_fn = [('Train', train_input_fn), ('Eval', eval_input_fn)]
for name, input_fn in name_input_fn:
# Evaluate the model and print results
eval_results = lm_classifier.evaluate(input_fn=input_fn)
result_tuple = (epoch, eval_results['accuracy'], eval_results['loss'])
print(name, 'accuracy after %d epochs is: %.3f (%.4f)' % result_tuple)
# Compute the privacy budget expended so far.
if FLAGS.dpsgd:
eps = compute_epsilon(epoch * steps_per_epoch)
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
else:
print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__':
app.run(main)