问题
I build a CNN 1d Autoencoder in Keras, following the advice in this SO question, where Encoder and Decoder are separated. My goal is to re-use the decoder, once the Autoencoder has been trained. The central layer of my Autoencoder is a Dense
layer, because I would like to learn it afterwards.
My problem is that if I compile and fit the whole Autoencoder, written as Decoder()Encoder()(x)
where x
is the input, I get a different prediction when I do
autoencoder.predict(training_set)
w.r.t. if I first encode the training set in a set of central features, and then let the decoder decode them. These two approaches should give identical answers, once the Autoencoder has been trained.
from tensorflow.keras.layers import Input, Dense, BatchNormalization, Flatten, Lambda, Activation, Conv1D, MaxPooling1D, UpSampling1D, Reshape
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers
from tensorflow.keras.layers import GaussianNoise
import keras.backend as K
from tensorflow.keras.layers import Add
import tensorflow as tf
import scipy.io
import sys
import matplotlib.pyplot as plt
import numpy as np
import copy
training = # some training set, 1500 samples of 501 point each
testing = # some testing set, 500 samples of 501 point each
# reshaping for CNN
training = np.reshape(training, [1500, 501, 1])
testing = np.reshape(testing, [500, 501, 1])
# normalize input
X_mean = training.mean()
oscillations -= X_mean
X_std = training.std()
training /= X_std
copy_of_test = copy.copy(testing)
testing -= X_mean
testing /= X_std
### MODEL ###
def Encoder():
encoder_input = Input(batch_shape=(None, 501, 1))
e1 = Conv1D(256,3, activation='tanh', padding='valid')(encoder_input)
e2 = MaxPooling1D(2)(e1)
e3 = Conv1D(32,3, activation='tanh', padding='valid')(e2)
e4 = MaxPooling1D(2)(e3)
e5 = Flatten()(e4)
encoded = Dense(32,activation = 'tanh')(e5)
return Model(encoder_input, encoded)
def Decoder():
encoded_input = Input(shape=(32,))
encoded_reshaped = Reshape((32,1))(encoded_input)
d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
d5 = Flatten(name='decod_flatten')(d4)
d6 = Dense(501, name='decod_dense1')(d5)
decoded = Reshape((501,1), name='decod_reshape')(d6)
return Model(encoded_input, decoded)
# define input to the model:
x = Input(batch_shape=(None, 501, 1))
y = Input(shape=(32,))
# make the model:
autoencoder = Model(x, Decoder()(Encoder()(x)))
# compile the model:
autoencoder.compile(optimizer='adam', loss='mse')
for layer in autoencoder.layers: print(K.int_shape(layer.output))
epochs = 100
batch_size = 100
validation_split = 0.2
# train the model
history = autoencoder.fit(x = training, y = training,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split)
# Encoder
encoder = Model(inputs=x, outputs=Encoder()(x), name='encoder')
print('enc:')
for layer in encoder.layers: print(K.int_shape(layer.output))
features = encoder.predict(training) # features
# Decoder
decoder = Model(inputs=y, outputs=Decoder()(y), name='decoder')
print('dec:')
for layer in decoder.layers: print(K.int_shape(layer.output))
score = decoder.predict(features) #
score = np.squeeze(score)
predictions = autoencoder.predict(training)
predictions = np.squeeze(predictions)
# plotting one random case
# score should be equal to predictions!
# because score is obtained from the trained decoder acting on the encoded features, while predictions are obtained form the Autoencoder acting on the training set
plt.plot(score[100], label='eD')
plt.plot(predictions[100], label='AE')
plt.legend()
plt.show()
plt.close()
EDIT following OverLordGoldDragon's answer:
I implemented the suggestion in the answer, writing the following in the same file:
def reset_seeds():
np.random.seed(1)
random.seed(2)
if tf.__version__[0] == '2':
tf.random.set_seed(3)
else:
tf.set_random_seed(3)
print("RANDOM SEEDS RESET")
def Encoder():
encoder_input = Input(batch_shape=(None, 501, 1))
e1 = Conv1D(256,3, activation='tanh', padding='valid')(encoder_input)
e2 = MaxPooling1D(2)(e1)
e3 = Conv1D(32,3, activation='tanh', padding='valid')(e2)
e4 = MaxPooling1D(2)(e3)
e5 = Flatten()(e4)
encoded = Dense(32,activation = 'tanh')(e5)
encoded = Reshape((32,1))(encoded)
return Model(encoder_input, encoded)
def Decoder():
encoded_input = Input(shape=(32,))
encoded_reshaped = Reshape((32,1))(encoded_input)
d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
d5 = Flatten(name='decod_flatten')(d4)
d6 = Dense(501, name='decod_dense1')(d5)
decoded = Reshape((501,1), name='decod_reshape')(d6)
return Model(encoded_input, decoded)
def DecoderAE(encoder_input, encoded_input):
encoded_reshaped = Reshape((32,1))(encoded_input)
d1 = Conv1D(32, 3, activation='tanh', padding='valid',
name='decod_conv1d_1')(encoded_reshaped)
d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
d5 = Flatten(name='decod_flatten')(d4)
d6 = Dense(501, name='decod_dense1')(d5)
decoded = Reshape((501,1), name='decod_reshape')(d6)
return Model(encoder_input, decoded)
def load_weights(model, filepath):
with h5py.File(filepath, mode='r') as f:
file_layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
model_layer_names = [layer.name for layer in model.layers]
weight_values_to_load = []
for name in file_layer_names:
if name not in model_layer_names:
print(name, "is ignored; skipping")
continue
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = []
if len(weight_names) != 0:
weight_values = [g[weight_name] for weight_name in weight_names]
try:
layer = model.get_layer(name=name)
except:
layer = None
if layer is not None:
symbolic_weights = (layer.trainable_weights +
layer.non_trainable_weights)
if len(symbolic_weights) != len(weight_values):
print('Model & file weights shapes mismatch')
else:
weight_values_to_load += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_values_to_load)
X = np.random.randn(10, 501, 1)
reset_seeds()
encoder = Encoder()
AE = DecoderAE(encoder.input, encoder.output)
AE.compile(optimizer='adam', loss='mse')
epochs = 10
batch_size = 100
validation_split = 0.2
# train the model
history = AE.fit(x = training, y = training,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split)
reset_seeds()
encoder = Encoder()
decoder = Decoder()
# Test equality
features = encoder.predict(X)
features = np.squeeze(features) # had to add this otherwise it would complain because of wrong shapes
score = decoder.predict(features)
predictions = AE.predict(X)
print(np.sum(score - predictions))
# I am actually getting values >> 1
AE.save_weights('autoencoder_weights.h5')
AE_saved_weights = AE.get_weights()
decoder = Decoder()
load_weights(decoder, 'autoencoder_weights.h5') # see "reference"
decoder_loaded_weights = decoder.get_weights()
AE_decoder_weights = AE_saved_weights[-len(decoder_loaded_weights):]
for w1, w2 in zip(AE_decoder_weights, decoder_loaded_weights):
print(np.sum(w1 - w2))
The code runs training the AE, however
1) I get values >>1 for the difference between score
and predictions
2) the code stops producing
(u'input_1', 'is ignored; skipping')
(u'conv1d', 'is ignored; skipping')
(u'max_pooling1d', 'is ignored; skipping')
(u'conv1d_1', 'is ignored; skipping')
(u'max_pooling1d_1', 'is ignored; skipping')
(u'flatten', 'is ignored; skipping')
(u'dense', 'is ignored; skipping')
(u'reshape', 'is ignored; skipping')
(u'reshape_1', 'is ignored; skipping')
Traceback (most recent call last):
File "Autoenc.py", line 256, in <module>
load_weights(decoder, 'autoencoder_weights.h5') # see "reference"
File "Autoenc.py", line 219, in load_weights
K.batch_set_value(weight_values_to_load)
File "/home/user/anaconda3/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 2725, in batch_set_value
assign_placeholder = tf.placeholder(tf_dtype,
AttributeError: 'module' object has no attribute 'placeholder'
2 EDIT
Here is my new file following last comment by @OverLordGoldDragon . I get the error posted below.
def reset_seeds():
np.random.seed(1)
random.seed(2)
if tf.__version__[0] == '2':
tf.random.set_seed(3)
else:
tf.set_random_seed(3)
print("RANDOM SEEDS RESET")
def Encoder():
encoder_input = Input(batch_shape=(None, 501, 1))
e1 = Conv1D(256,3, activation='tanh', padding='valid')(encoder_input)
e2 = MaxPooling1D(2)(e1)
e3 = Conv1D(32,3, activation='tanh', padding='valid')(e2)
e4 = MaxPooling1D(2)(e3)
e5 = Flatten()(e4)
encoded = Dense(32,activation = 'tanh')(e5)
encoded = Reshape((32,1))(encoded)
return Model(encoder_input, encoded)
def Decoder():
encoded_input = Input(shape=(32,))
encoded_reshaped = Reshape((32,1))(encoded_input)
d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
d5 = Flatten(name='decod_flatten')(d4)
d6 = Dense(501, name='decod_dense1')(d5)
decoded = Reshape((501,1), name='decod_reshape')(d6)
return Model(encoded_input, decoded)
def DecoderAE(encoder_input, encoded_input):
encoded_reshaped = Reshape((32,1))(encoded_input)
d1 = Conv1D(32, 3, activation='tanh', padding='valid',
name='decod_conv1d_1')(encoded_reshaped)
d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
d5 = Flatten(name='decod_flatten')(d4)
d6 = Dense(501, name='decod_dense1')(d5)
decoded = Reshape((501,1), name='decod_reshape')(d6)
return Model(encoder_input, decoded)
def load_weights(model, filepath):
with h5py.File(filepath, mode='r') as f:
file_layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
model_layer_names = [layer.name for layer in model.layers]
weight_values_to_load = []
for name in file_layer_names:
if name not in model_layer_names:
print(name, "is ignored; skipping")
continue
g = f[name]
weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
weight_values = []
if len(weight_names) != 0:
weight_values = [g[weight_name] for weight_name in weight_names]
try:
layer = model.get_layer(name=name)
except:
layer = None
if layer is not None:
symbolic_weights = (layer.trainable_weights +
layer.non_trainable_weights)
if len(symbolic_weights) != len(weight_values):
print('Model & file weights shapes mismatch')
else:
weight_values_to_load += zip(symbolic_weights, weight_values)
K.batch_set_value(weight_values_to_load)
X = np.random.randn(10, 501, 1)
reset_seeds()
encoder = Encoder()
AE = DecoderAE(encoder.input, encoder.output)
AE.compile(optimizer='adam', loss='mse')
epochs = 2
batch_size = 100
validation_split = 0.2
# train the model
history = AE.fit(x = training, y = training,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split)
reset_seeds()
encoder = Encoder()
decoder = Decoder()
decoder.save_weights('decoder_weights.h5')
AE.save_weights('autoencoder_weights.h5')
AE_saved_weights = AE.get_weights()
decoder = Decoder()
load_weights(decoder, 'autoencoder_weights.h5') # see "reference"
decoder_loaded_weights = decoder.get_weights()
# Test equality
features = encoder.predict(X)
features = np.squeeze(features)
score = decoder.predict(features)
predictions = AE.predict(X)
print(np.sum(score - predictions))
AE_decoder_weights = AE_saved_weights[-len(decoder_loaded_weights):]
for w1, w2 in zip(AE_decoder_weights, decoder_loaded_weights):
print(np.sum(w1 - w2))
Traceback (most recent call last):
File "Autoenc_pazzo.py", line 251, in <module>
decoder_loaded_weights = decoder.get_weights()
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/training.py", line 153, in get_weights
return super(Model, self).get_weights()
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 1130, in get_weights
return backend.batch_get_value(params)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/backend.py", line 3010, in batch_get_value
return get_session(tensors).run(tensors)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1350, in _do_run
run_metadata)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Error while reading resource variable decod_conv1d_1_2/bias from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/decod_conv1d_1_2/bias/N10tensorflow3VarE does not exist.
[[node decod_conv1d_1_2/bias/Read/ReadVariableOp (defined at Autoenc_pazzo.py:168) ]]
Original stack trace for u'decod_conv1d_1_2/bias/Read/ReadVariableOp':
File "Autoenc_pazzo.py", line 249, in <module>
decoder = Decoder()
File "Autoenc_pazzo.py", line 168, in Decoder
d1 = Conv1D(32, 3, activation='tanh', padding='valid', name='decod_conv1d_1')(encoded_reshaped)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 591, in __call__
self._maybe_build(inputs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 1881, in _maybe_build
self.build(input_shapes)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/layers/convolutional.py", line 174, in build
dtype=self.dtype)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 384, in add_weight
aggregation=aggregation)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/training/tracking/base.py", line 663, in _add_variable_with_custom_getter
**kwargs_for_getter)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 155, in make_variable
shape=variable_shape if variable_shape.rank else None)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 259, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 220, in _variable_v1_call
shape=shape)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 198, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 2495, in default_variable_creator
shape=shape)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/variables.py", line 263, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 460, in __init__
shape=shape)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 649, in _init_from_args
value = self._read_variable_op()
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 935, in _read_variable_op
self._dtype)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/ops/gen_resource_variable_ops.py", line 587, in read_variable_op
"ReadVariableOp", resource=resource, dtype=dtype, name=name)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3616, in create_op
op_def=op_def)
File "/home/alessio/anaconda3/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
回答1:
Main problem with your code: lack of use of a random seed. I refer to full explanation to this answer, and focus on your particular instance here.
Explanation:
- Order of model instantiation matters, as it changes the initialized weights
- In usage, if you train multiple models, you should reset seed both at model instantiation and before training
- Your AE, Encoder, and Decoder definitions make a redundant use of
Input
, and complicate introspection (e.g..summary()
);Encoder()
andDecoder()
already take care of it - To check whether loaded decoder weights match saved trained AE decoder weights, see example below
SOLUTION:
reset_seeds()
X = np.random.randn(10, 501, 1) # '10' arbitrary
encoder_input = Input(batch_shape=(None, 501, 1))
reset_seeds()
encoder = Encoder()
decoder = Decoder()
autoencoder = Model(x, decoder(encoder(x)))
autoencoder.compile(optimizer='adam', loss='mse')
reset_seeds()
encoder = Encoder()
decoder = Decoder()
predictions = autoencoder.predict(X)
features = encoder.predict(X)
score = decoder.predict(features)
print(np.sum(score - predictions))
# 0.0 <-- 100% agreement
Save/load example + preferred AE definition; reference
Your AE definition limits introspection via e.g. .summary()
; instead, define as below.
X = np.random.randn(10, 501, 1)
reset_seeds()
encoder = Encoder()
AE = DecoderAE(encoder.input, encoder.output)
AE.compile(optimizer='adam', loss='mse')
reset_seeds()
encoder = Encoder()
decoder = Decoder()
# Test equality
features = encoder.predict(X)
score = decoder.predict(features)
predictions = AE.predict(X)
print(np.sum(score - predictions))
# 0.0 <-- exact or close to
AE.save_weights('autoencoder_weights.h5')
AE_saved_weights = AE.get_weights()
decoder = Decoder()
load_weights(decoder, 'autoencoder_weights.h5') # see "reference"
decoder_loaded_weights = decoder.get_weights()
AE_decoder_weights = AE_saved_weights[-len(decoder_loaded_weights):]
for w1, w2 in zip(AE_decoder_weights, decoder_loaded_weights):
print(np.sum(w1 - w2))
# 0.0
# 0.0
# ...
Functions used:
def reset_seeds():
np.random.seed(1)
random.seed(2)
if tf.__version__[0] == '2':
tf.random.set_seed(3)
else:
tf.set_random_seed(3)
print("RANDOM SEEDS RESET")
def DecoderAE(encoder_input, encoded_input):
encoded_reshaped = Reshape((32,1))(encoded_input)
d1 = Conv1D(32, 3, activation='tanh', padding='valid',
name='decod_conv1d_1')(encoded_reshaped)
d2 = UpSampling1D(2, name='decod_upsampling1d_1')(d1)
d3 = Conv1D(256, 3, activation='tanh', padding='valid', name='decod_conv1d_2')(d2)
d4 = UpSampling1D(2, name='decod_upsampling1d_2')(d3)
d5 = Flatten(name='decod_flatten')(d4)
d6 = Dense(501, name='decod_dense1')(d5)
decoded = Reshape((501,1), name='decod_reshape')(d6)
return Model(encoder_input, decoded)
来源:https://stackoverflow.com/questions/58367519/why-do-predictions-differ-for-autoencoder-vs-encoder-decoder