问题
Can somebody explain, how does TensorFlow's eager mode work? I am trying to build a simple regression as follows:
import tensorflow as tf
tfe = tf.contrib.eager
tf.enable_eager_execution()
import numpy as np
def make_model():
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(4, activation='relu'))
net.add(tf.keras.layers.Dense(1))
return net
def compute_loss(pred, actual):
return tf.reduce_mean(tf.square(tf.subtract(pred, actual)))
def compute_gradient(model, pred, actual):
"""compute gradients with given noise and input"""
with tf.GradientTape() as tape:
loss = compute_loss(pred, actual)
grads = tape.gradient(loss, model.variables)
return grads, loss
def apply_gradients(optimizer, grads, model_vars):
optimizer.apply_gradients(zip(grads, model_vars))
model = make_model()
optimizer = tf.train.AdamOptimizer(1e-4)
x = np.linspace(0,1,1000)
y = x+np.random.normal(0,0.3,1000)
y = y.astype('float32')
train_dataset = tf.data.Dataset.from_tensor_slices((y.reshape(-1,1)))
epochs = 2# 10
batch_size = 25
itr = y.shape[0] // batch_size
for epoch in range(epochs):
for data in tf.contrib.eager.Iterator(train_dataset.batch(25)):
preds = model(data)
grads, loss = compute_gradient(model, preds, data)
print(grads)
apply_gradients(optimizer, grads, model.variables)
# with tf.GradientTape() as tape:
# loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(preds, data))))
# grads = tape.gradient(loss, model.variables)
# print(grads)
# optimizer.apply_gradients(zip(grads, model.variables),global_step=None)
Gradient output: [None, None, None, None, None, None]
The error is following:
----------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-3-a589b9123c80> in <module>
35 grads, loss = compute_gradient(model, preds, data)
36 print(grads)
---> 37 apply_gradients(optimizer, grads, model.variables)
38 # with tf.GradientTape() as tape:
39 # loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(preds, data))))
<ipython-input-3-a589b9123c80> in apply_gradients(optimizer, grads, model_vars)
17
18 def apply_gradients(optimizer, grads, model_vars):
---> 19 optimizer.apply_gradients(zip(grads, model_vars))
20
21 model = make_model()
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/training/optimizer.py in apply_gradients(self, grads_and_vars, global_step, name)
589 if not var_list:
590 raise ValueError("No gradients provided for any variable: %s." %
--> 591 ([str(v) for _, v, _ in converted_grads_and_vars],))
592 with ops.init_scope():
593 self._create_slots(var_list)
ValueError: No gradients provided for any variable:
Edit
I updated my code. Now, the problem comes in gradients calculation, it is returning zero. I have checked the loss value that is non-zero.
回答1:
Part 1: The problem is indeed the datatype of your input. By default your keras model expects float32 but you are passing a float64. You can either change the dtype of the model or change the input to float32.
To change your model:
def make_model():
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(4, activation='relu', dtype='float32'))
net.add(tf.keras.layers.Dense(4, activation='relu'))
net.add(tf.keras.layers.Dense(1))
return net
To change your input:
y = y.astype('float32')
Part 2: You need to call the function that computes your model (i.e. model(data)
) under tf.GradientTape(). For example, you can replace your compute_loss
method with the following:
def compute_loss(model, x, y):
pred = model(x)
return tf.reduce_mean(tf.square(tf.subtract(pred, y)))
来源:https://stackoverflow.com/questions/54255431/invalidargumenterror-cannot-compute-matmul-as-input-0zero-based-was-expected