Skip to content

Commit cbbc5b0

Browse files
update tests
1 parent 1b21fde commit cbbc5b0

File tree

11 files changed

+90
-82
lines changed

11 files changed

+90
-82
lines changed

adapt/feature_based/_cdan.py

+2-16
Original file line numberDiff line numberDiff line change
@@ -278,8 +278,9 @@ def train_step(self, data):
278278

279279
# Update weights
280280
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
281-
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
282281
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
282+
if len(gradients_enc) > 0:
283+
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
283284

284285
# Update metrics
285286
logs = self._update_logs(ys, ys_pred)
@@ -334,21 +335,6 @@ def _initialize_networks(self):
334335
self.discriminator_ = check_network(self.discriminator,
335336
copy=self.copy,
336337
name="discriminator")
337-
338-
339-
340-
# def _initialize_networks(self, shape_Xt):
341-
# Call predict to avoid strange behaviour with
342-
# Sequential model whith unspecified input_shape
343-
# zeros_enc_ = self.encoder_.predict(np.zeros((1,) + shape_Xt));
344-
# zeros_task_ = self.task_.predict(zeros_enc_);
345-
# if zeros_task_.shape[1] * zeros_enc_.shape[1] > self.max_features:
346-
# self.discriminator_.predict(np.zeros((1, self.max_features)))
347-
# else:
348-
# zeros_mapping_ = np.matmul(np.expand_dims(zeros_enc_, 2),
349-
# np.expand_dims(zeros_task_, 1))
350-
# zeros_mapping_ = np.reshape(zeros_mapping_, (1, -1))
351-
# self.discriminator_.predict(zeros_mapping_);
352338

353339

354340
def predict_disc(self, X):

adapt/feature_based/_wdgrl.py

+1-4
Original file line numberDiff line numberDiff line change
@@ -183,10 +183,7 @@ def train_step(self, data):
183183
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
184184

185185
# Update metrics
186-
self.compiled_metrics.update_state(ys, ys_pred)
187-
self.compiled_loss(ys, ys_pred)
188-
# Return a dict mapping metric names to current value
189-
logs = {m.name: m.result() for m in self.metrics}
186+
logs = self._update_logs(ys, ys_pred)
190187
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
191188
logs.update(disc_metrics)
192189
logs.update({"gp": penalty})

adapt/parameter_based/_regular.py

+46-22
Original file line numberDiff line numberDiff line change
@@ -412,41 +412,65 @@ def _initialize_networks(self):
412412
else:
413413
self.task_ = check_network(self.task,
414414
copy=self.copy,
415+
force_copy=True,
415416
name="task")
417+
418+
419+
def _initialize_weights(self, shape_X):
420+
if hasattr(self, "task_"):
421+
self.task_.build((None,) + shape_X)
422+
self.build((None,) + shape_X)
416423
self._add_regularization()
417424

418425

419-
def _get_regularizer(self, old_weight, weight, lambda_=1.):
426+
def _get_regularizer(self, old_weight, weight, lambda_):
420427
if self.regularizer == "l2":
421-
def regularizer():
422-
return lambda_ * tf.reduce_mean(tf.square(old_weight - weight))
428+
return lambda_ * tf.reduce_mean(tf.square(old_weight - weight))
423429
if self.regularizer == "l1":
424-
def regularizer():
425-
return lambda_ * tf.reduce_mean(tf.abs(old_weight - weight))
430+
return lambda_ * tf.reduce_mean(tf.abs(old_weight - weight))
426431
return regularizer
427432

428433

434+
def train_step(self, data):
435+
# Unpack the data.
436+
Xs, Xt, ys, yt = self._unpack_data(data)
437+
438+
# Run forward pass.
439+
with tf.GradientTape() as tape:
440+
y_pred = self.task_(Xt, training=True)
441+
if hasattr(self, "_compile_loss") and self._compile_loss is not None:
442+
loss = self._compile_loss(yt, y_pred)
443+
else:
444+
loss = self.compiled_loss(yt, y_pred)
445+
446+
loss = tf.reduce_mean(loss)
447+
loss += sum(self.losses)
448+
reg_loss = 0.
449+
for i in range(len(self.task_.trainable_variables)):
450+
reg_loss += self._get_regularizer(self.old_weights_[i],
451+
self.task_.trainable_variables[i],
452+
self.lambdas_[i])
453+
loss += reg_loss
454+
455+
# Run backwards pass.
456+
gradients = tape.gradient(loss, self.task_.trainable_variables)
457+
self.optimizer.apply_gradients(zip(gradients, self.task_.trainable_variables))
458+
return self._update_logs(yt, y_pred)
459+
460+
429461
def _add_regularization(self):
430-
i = 0
462+
self.old_weights_ = []
431463
if not hasattr(self.lambdas, "__iter__"):
432-
lambdas = [self.lambdas]
464+
self.lambdas_ = [self.lambdas] * len(self.task_.weights)
433465
else:
434-
lambdas = self.lambdas
466+
self.lambdas_ = (self.lambdas +
467+
[self.lambdas[-1]] * (len(self.task_.weights) - len(self.lambdas)))
468+
self.lambdas_ = self.lambdas_[::-1]
435469

436-
for layer in reversed(self.task_.layers):
437-
if (hasattr(layer, "weights") and
438-
layer.weights is not None and
439-
len(layer.weights) != 0):
440-
if i >= len(lambdas):
441-
lambda_ = lambdas[-1]
442-
else:
443-
lambda_ = lambdas[i]
444-
for weight in reversed(layer.weights):
445-
old_weight = tf.identity(weight)
446-
old_weight.trainable = False
447-
self.add_loss(self._get_regularizer(
448-
old_weight, weight, lambda_))
449-
i += 1
470+
for weight in self.task_.trainable_variables:
471+
old_weight = tf.identity(weight)
472+
old_weight.trainable = False
473+
self.old_weights_.append(old_weight)
450474

451475

452476
def call(self, inputs):

tests/test_adda.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import numpy as np
77
import tensorflow as tf
88
from tensorflow.keras import Sequential, Model
9-
from tensorflow.keras.layers import Dense
9+
from tensorflow.keras.layers import Dense, Input
1010
from tensorflow.keras.initializers import GlorotUniform
1111
from tensorflow.keras.optimizers import Adam
1212

@@ -26,7 +26,8 @@
2626

2727
def _get_encoder(input_shape=Xs.shape[1:]):
2828
model = Sequential()
29-
model.add(Dense(1, input_shape=input_shape,
29+
model.add(Input(shape=input_shape))
30+
model.add(Dense(1,
3031
kernel_initializer="ones",
3132
use_bias=False))
3233
model.compile(loss="mse", optimizer="adam")
@@ -35,8 +36,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
3536

3637
def _get_discriminator(input_shape=(1,)):
3738
model = Sequential()
39+
model.add(Input(shape=input_shape))
3840
model.add(Dense(10,
39-
input_shape=input_shape,
4041
kernel_initializer=GlorotUniform(seed=0),
4142
activation="elu"))
4243
model.add(Dense(1,
@@ -48,10 +49,10 @@ def _get_discriminator(input_shape=(1,)):
4849

4950
def _get_task(input_shape=(1,), output_shape=(1,)):
5051
model = Sequential()
52+
model.add(Input(shape=input_shape))
5153
model.add(Dense(np.prod(output_shape),
5254
use_bias=False,
53-
kernel_initializer=GlorotUniform(seed=0),
54-
input_shape=input_shape))
55+
kernel_initializer=GlorotUniform(seed=0)))
5556
model.compile(loss="mse", optimizer=Adam(0.1))
5657
return model
5758

tests/test_cdan.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import numpy as np
66
import tensorflow as tf
77
from tensorflow.keras import Sequential, Model
8-
from tensorflow.keras.layers import Dense
8+
from tensorflow.keras.layers import Dense, Input
99
from tensorflow.keras.optimizers import Adam
1010
from tensorflow.keras.initializers import GlorotUniform
1111

@@ -27,16 +27,17 @@ def _entropy(x):
2727

2828
def _get_encoder(input_shape=Xs.shape[1:], units=10):
2929
model = Sequential()
30-
model.add(Dense(units, input_shape=input_shape,
30+
model.add(Input(shape=input_shape))
31+
model.add(Dense(units,
3132
kernel_initializer=GlorotUniform(seed=0),))
3233
model.compile(loss="mse", optimizer="adam")
3334
return model
3435

3536

3637
def _get_discriminator(input_shape=(10*2,)):
3738
model = Sequential()
39+
model.add(Input(shape=input_shape))
3840
model.add(Dense(10,
39-
input_shape=input_shape,
4041
kernel_initializer=GlorotUniform(seed=0),
4142
activation="relu"))
4243
model.add(Dense(1, activation="sigmoid", kernel_initializer=GlorotUniform(seed=0)))
@@ -46,9 +47,9 @@ def _get_discriminator(input_shape=(10*2,)):
4647

4748
def _get_task(input_shape=(10,)):
4849
model = Sequential()
50+
model.add(Input(shape=input_shape))
4951
model.add(Dense(2,
5052
kernel_initializer=GlorotUniform(seed=0),
51-
input_shape=input_shape,
5253
activation="softmax"))
5354
model.compile(loss="mse", optimizer=Adam(0.1))
5455
return model

tests/test_dann.py

+7-9
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,8 @@
66
import numpy as np
77
import tensorflow as tf
88
from tensorflow.keras import Sequential, Model
9-
from tensorflow.keras.layers import Dense
10-
try:
11-
from tensorflow.keras.optimizers import Adam, SGD
12-
except:
13-
from tensorflow.keras.optimizers.legacy import Adam, SGD
9+
from tensorflow.keras.layers import Dense, Input
10+
from tensorflow.keras.optimizers import Adam, SGD
1411

1512
from adapt.feature_based import DANN
1613
from adapt.utils import UpdateLambda
@@ -30,7 +27,8 @@
3027

3128
def _get_encoder(input_shape=Xs.shape[1:]):
3229
model = Sequential()
33-
model.add(Dense(1, input_shape=input_shape,
30+
model.add(Input(shape=input_shape))
31+
model.add(Dense(1,
3432
kernel_initializer="ones",
3533
use_bias=False))
3634
model.compile(loss="mse", optimizer="adam")
@@ -39,8 +37,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
3937

4038
def _get_discriminator(input_shape=(1,)):
4139
model = Sequential()
40+
model.add(Input(shape=input_shape))
4241
model.add(Dense(10,
43-
input_shape=input_shape,
4442
kernel_initializer=GlorotUniform(seed=0),
4543
activation="elu"))
4644
model.add(Dense(1,
@@ -52,10 +50,10 @@ def _get_discriminator(input_shape=(1,)):
5250

5351
def _get_task(input_shape=(1,), output_shape=(1,)):
5452
model = Sequential()
53+
model.add(Input(shape=input_shape))
5554
model.add(Dense(np.prod(output_shape),
5655
kernel_initializer=GlorotUniform(seed=0),
57-
use_bias=False,
58-
input_shape=input_shape))
56+
use_bias=False))
5957
model.compile(loss="mse", optimizer=Adam(0.1))
6058
return model
6159

tests/test_mcd.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import numpy as np
66
import tensorflow as tf
77
from tensorflow.keras import Sequential, Model
8-
from tensorflow.keras.layers import Dense
8+
from tensorflow.keras.layers import Dense, Input
99
from tensorflow.keras.optimizers import Adam
1010
from tensorflow.keras.initializers import GlorotUniform
1111

@@ -25,7 +25,8 @@
2525

2626
def _get_encoder(input_shape=Xs.shape[1:]):
2727
model = Sequential()
28-
model.add(Dense(1, input_shape=input_shape,
28+
model.add(Input(shape=input_shape))
29+
model.add(Dense(1,
2930
kernel_initializer="ones",
3031
use_bias=False))
3132
model.compile(loss="mse", optimizer="adam")
@@ -34,8 +35,8 @@ def _get_encoder(input_shape=Xs.shape[1:]):
3435

3536
def _get_discriminator(input_shape=(1,)):
3637
model = Sequential()
38+
model.add(Input(shape=input_shape))
3739
model.add(Dense(10,
38-
input_shape=input_shape,
3940
kernel_initializer=GlorotUniform(seed=0),
4041
activation="relu"))
4142
model.add(Dense(1,
@@ -47,10 +48,10 @@ def _get_discriminator(input_shape=(1,)):
4748

4849
def _get_task(input_shape=(1,), output_shape=(1,)):
4950
model = Sequential()
51+
model.add(Input(shape=input_shape))
5052
model.add(Dense(np.prod(output_shape),
5153
kernel_initializer=GlorotUniform(seed=0),
52-
use_bias=False,
53-
input_shape=input_shape))
54+
use_bias=False))
5455
model.compile(loss="mse", optimizer=Adam(0.1))
5556
return model
5657

tests/test_mdd.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
import numpy as np
66
import tensorflow as tf
77
from tensorflow.keras import Sequential, Model
8-
from tensorflow.keras.layers import Dense
8+
from tensorflow.keras.layers import Dense, Input
99
from tensorflow.keras.optimizers import Adam
1010
from tensorflow.keras.initializers import GlorotUniform
1111

@@ -25,17 +25,17 @@
2525

2626
def _get_encoder(input_shape=Xs.shape[1:]):
2727
model = Sequential()
28-
model.add(Dense(1, input_shape=input_shape,
29-
kernel_initializer="ones",
28+
model.add(Input(shape=input_shape))
29+
model.add(Dense(1, kernel_initializer="ones",
3030
use_bias=False))
3131
model.compile(loss="mse", optimizer="adam")
3232
return model
3333

3434

3535
def _get_discriminator(input_shape=(1,)):
3636
model = Sequential()
37+
model.add(Input(shape=input_shape))
3738
model.add(Dense(10,
38-
input_shape=input_shape,
3939
kernel_initializer=GlorotUniform(seed=0),
4040
activation="relu"))
4141
model.add(Dense(1,
@@ -47,10 +47,10 @@ def _get_discriminator(input_shape=(1,)):
4747

4848
def _get_task(input_shape=(1,), output_shape=(1,)):
4949
model = Sequential()
50+
model.add(Input(shape=input_shape))
5051
model.add(Dense(np.prod(output_shape),
5152
use_bias=False,
52-
kernel_initializer=GlorotUniform(seed=0),
53-
input_shape=input_shape))
53+
kernel_initializer=GlorotUniform(seed=0)))
5454
model.compile(loss="mse", optimizer=Adam(0.1))
5555
return model
5656

tests/test_regular.py

+9-6
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from sklearn.base import clone
1111
import tensorflow as tf
1212
from tensorflow.keras import Sequential, Model
13-
from tensorflow.keras.layers import Dense
13+
from tensorflow.keras.layers import Dense, Input
1414
from tensorflow.keras.optimizers import Adam
1515
from tensorflow.keras.initializers import GlorotUniform
1616

@@ -40,10 +40,10 @@
4040

4141
def _get_network(input_shape=(1,), output_shape=(1,)):
4242
model = Sequential()
43+
model.add(Input(shape=input_shape))
4344
model.add(Dense(np.prod(output_shape),
44-
input_shape=input_shape,
4545
kernel_initializer=GlorotUniform(seed=0),
46-
use_bias=False))
46+
use_bias=True))
4747
model.compile(loss="mse", optimizer=Adam(0.1))
4848
return model
4949

@@ -150,17 +150,20 @@ def test_regularnn_fit():
150150
tf.random.set_seed(0)
151151
np.random.seed(0)
152152
network = _get_network()
153+
print(network.get_weights())
153154
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
154-
model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1))
155+
print(network.get_weights())
156+
model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1), loss="mse")
155157
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
158+
print(model.task_.get_weights())
156159
# assert np.abs(network.predict(Xs) - ys_reg).sum() < 1
157-
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) > 4.
160+
assert np.sum(np.abs(network.get_weights()[0] - model.task_.get_weights()[0])) > 4.
158161
assert np.abs(model.predict(Xt) - yt_reg).sum() < 10
159162

160163
model = RegularTransferNN(network, lambdas=10000000., optimizer=Adam(0.1))
161164
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
162165

163-
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
166+
assert np.sum(np.abs(network.get_weights()[0] - model.task_.get_weights()[0])) < 0.001
164167
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
165168

166169

0 commit comments

Comments
 (0)