问题
I am new to tensorflow, and I think I got the right answer, but I am missing something minimal, that I cant find online. I hope someone send me a reference or leads me to what I am missing.
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
batch_size = 128
test_size = 256
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden):
l1a = tf.nn.relu(tf.nn.conv2d(X, w, # l1a shape=(?, 28, 28, 32)
strides=[1, 1, 1, 1], padding='SAME'))
l1 = tf.nn.max_pool(l1a, ksize=[1, 2, 2, 1], # l1 shape=(?, 14, 14, 32)
strides=[1, 2, 2, 1], padding='SAME')
l1 = tf.nn.dropout(l1, p_keep_conv)
l2a = tf.nn.relu(tf.nn.conv2d(l1, w2, # l2a shape=(?, 14, 14, 64)
strides=[1, 1, 1, 1], padding='SAME'))
l2 = tf.nn.max_pool(l2a, ksize=[1, 2, 2, 1], # l2 shape=(?, 7, 7, 64)
strides=[1, 2, 2, 1], padding='SAME')
l2 = tf.nn.dropout(l2, p_keep_conv)
l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, # l3a shape=(?, 7, 7, 128)
strides=[1, 1, 1, 1], padding='SAME'))
l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], # l3 shape=(?, 4, 4, 128)
strides=[1, 2, 2, 1], padding='SAME')
l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]]) # reshape to (?, 2048)
l3 = tf.nn.dropout(l3, p_keep_conv)
l4 = tf.nn.relu(tf.matmul(l3, w4))
l4 = tf.nn.dropout(l4, p_keep_hidden)
pyx = tf.matmul(l4, w_o)
return pyx
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX = trX.reshape(-1, 28, 28, 1) # 28x28x1 input img
teX = teX.reshape(-1, 28, 28, 1) # 28x28x1 input img
X = tf.placeholder("float", [None, 28, 28, 1])
Y = tf.placeholder("float", [None, 10])
w = init_weights([3, 3, 1, 32]) # 3x3x1 conv, 32 outputs
w2 = init_weights([3, 3, 32, 64]) # 3x3x32 conv, 64 outputs
w3 = init_weights([3, 3, 64, 128]) # 3x3x32 conv, 128 outputs
w4 = init_weights([128 * 4 * 4, 625]) # FC 128 * 4 * 4 inputs, 625 outputs
w_o = init_weights([625, 10]) # FC 625 inputs, 10 outputs (labels)
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w, w2, w3, w4, w_o, p_keep_conv, p_keep_hidden)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
# Launch the graph in a session
saver = tf.train.Saver()
with tf.Session() as sess:
# you need to initialize all variables
tf.global_variables_initializer().run()
for i in range(100):
training_batch = zip(range(0, len(trX), batch_size),
range(batch_size, len(trX)+1, batch_size))
for start, end in training_batch:
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_conv: 0.8, p_keep_hidden: 0.5})
test_indices = np.arange(len(teX)) # Get A Test Batch
np.random.shuffle(test_indices)
test_indices = test_indices[0:test_size]
print(i, np.mean(np.argmax(teY[test_indices], axis=1) ==
sess.run(predict_op, feed_dict={X: teX[test_indices],
Y: teY[test_indices],
p_keep_conv: 1.0,
p_keep_hidden: 1.0})))
save_path = saver.save(sess, "tmp/model.ckpt")
print("Model saved in file: %s" % save_path)
After all of this, now I am trying to predict a single image from this array just as an example (I know its not a proper test), to give me the class using:
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, "tmp/model.ckpt")
print "...Model Loaded..."
prediction=tf.argmax(predict_op,1)
print prediction.eval(feed_dict={X: teX[2].reshape(1,28,28,1)}, session=sess)
But im getting this error:
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_3' with dtype
This previous problem has been solved by adding p_keep_conv: 1.0, p_keep_hidden: 1.0 to the dict.
After this another issue appeared:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-91-3e9ead14a8b3> in <module>()
4 print "...Model Loaded..."
5 prediction=tf.argmax(predict_op,1)
----> 6 classification = sess.run(tf.argmax(predict_op, 1), feed_dict={X: teX[3].reshape(1,28,28,1),p_keep_conv: 1.0,p_keep_hidden: 1.0})
7
....
InvalidArgumentError: Expected dimension in the range [-1, 1), but got 1
[[Node: ArgMax_21 = ArgMax[T=DT_INT64, Tidx=DT_INT32, output_type=DT_INT64, _device="/job:localhost/replica:0/task:0/device:CPU:0"](ArgMax/_37, ArgMax_21/dimension)]]
回答1:
I'm summing up what we said in the comments in this answer.
Placeholder error:
Your prediction.eval()
call has a feed_dict
that doesn't contain a value for p_keep_conv
and p_keep_hidden
. Note that, since you don't provide a name=...
argument whe defining your placholders, they get the default name Placeholder_N
which is what the error message shows. It's a good practice to always give a meaningful name to variables, constants and placeholders in order to make debugging easier.
Argmax expected dimension:
tf.argmax's definition states:
axis: A Tensor. Must be one of the following types: int32, int64. int32, 0 <= axis < rank(input). Describes which axis of the input Tensor to reduce across.
It seems, then, that the only way to run argmax
on the last axis of the tensor is by giving it axis=-1
, because of the "strictly less than" sign in the definition of the function (I don't understand why they made this design choice).
来源:https://stackoverflow.com/questions/47390035/how-to-predict-a-specific-image-using-mnist