问题
I am getting an error when I try to convert my model to use a Tensorflow Estimator, and it think it's due to my_model
not having an active session in place. So should Tensorflow operations be conducted outside of my_model ?
For example, I am getting an error by the way I currently define it:
def my_model( features, labels, mode, params):
train_dataset = features
train_labels = labels
batch_sizeE=params["batch_size"]
embedding_sizeE=params["embedding_size"]
num_inputsE=params["num_inputs"]
num_sampledE=params["num_sampled"]
print(features)
print(labels)
epochCount = tf.get_variable( 'epochCount', initializer= 0) #to store epoch count to total # of epochs are known
update_epoch = tf.assign(epochCount, epochCount + 1)
embeddings = tf.get_variable( 'embeddings', dtype=tf.float32,
initializer= tf.random_uniform([vocabulary_size, embedding_sizeE], -1.0, 1.0, dtype=tf.float32) )
softmax_weights = tf.get_variable( 'softmax_weights', dtype=tf.float32,
initializer= tf.truncated_normal([vocabulary_size, embedding_sizeE],
stddev=1.0 / math.sqrt(embedding_sizeE), dtype=tf.float32 ) )
softmax_biases = tf.get_variable('softmax_biases', dtype=tf.float32,
initializer= tf.zeros([vocabulary_size], dtype=tf.float32), trainable=False )
embed = tf.nn.embedding_lookup(embeddings, train_dataset) #train data set is
embed_reshaped = tf.reshape( embed, [batch_sizeE*num_inputs, embedding_sizeE] )
segments= np.arange(batch_size).repeat(num_inputs)
averaged_embeds = tf.segment_mean(embed_reshaped, segments, name=None)
if mode == "train":
sSML = tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=averaged_embeds,
labels=train_labels, num_sampled=64, num_classes=3096637)
loss = tf.reduce_mean( sSML )
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=optimizer)
The error is at the sSML
loss function. Here is the error
INFO:tensorflow:Calling model_fn.
<tf.Variable 'softmax_weights:0' shape=(3096637, 50) dtype=float32_ref>
<tf.Variable 'softmax_biases:0' shape=(3096637,) dtype=float32_ref>
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-49-955f44867ee5> in <module>()
1 word2vecEstimator.train(
2 input_fn=generate_batch,
----> 3 steps=10)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/estimator/estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
352
353 saving_listeners = _check_listeners_type(saving_listeners)
--> 354 loss = self._train_model(input_fn, hooks, saving_listeners)
355 logging.info('Loss for final step: %s.', loss)
356 return self
/usr/local/lib/python3.6/dist-packages/tensorflow/python/estimator/estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
1205 return self._train_model_distributed(input_fn, hooks, saving_listeners)
1206 else:
-> 1207 return self._train_model_default(input_fn, hooks, saving_listeners)
1208
1209 def _train_model_default(self, input_fn, hooks, saving_listeners):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/estimator/estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
1235 worker_hooks.extend(input_hooks)
1236 estimator_spec = self._call_model_fn(
-> 1237 features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
1238 global_step_tensor = training_util.get_global_step(g)
1239 return self._train_with_estimator_spec(estimator_spec, worker_hooks,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/estimator/estimator.py in _call_model_fn(self, features, labels, mode, config)
1193
1194 logging.info('Calling model_fn.')
-> 1195 model_fn_results = self._model_fn(features=features, **kwargs)
1196 logging.info('Done calling model_fn.')
1197
<ipython-input-47-95d390a50046> in my_model(features, labels, mode, params)
47
48 sSML = tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=averaged_embeds,
---> 49 labels=train_labels, num_sampled=64, num_classes=3096637)
50
51 loss = tf.reduce_mean( sSML )
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_impl.py in sampled_softmax_loss(weights, biases, labels, inputs, num_sampled, num_classes, num_true, sampled_values, remove_accidental_hits, partition_strategy, name, seed)
1347 partition_strategy=partition_strategy,
1348 name=name,
-> 1349 seed=seed)
1350 labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
1351 sampled_losses = nn_ops.softmax_cross_entropy_with_logits_v2(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_impl.py in _compute_sampled_logits(weights, biases, labels, inputs, num_sampled, num_classes, num_true, sampled_values, subtract_log_q, remove_accidental_hits, partition_strategy, name, seed)
1029 with ops.name_scope(name, "compute_sampled_logits",
1030 weights + [biases, inputs, labels]):
-> 1031 if labels.dtype != dtypes.int64:
1032 labels = math_ops.cast(labels, dtypes.int64)
1033 labels_flat = array_ops.reshape(labels, [-1])
TypeError: data type not understood
I was wondering what the error was so I tried to print out my inputs to the samples softmax and I got this error
`ValueError: Cannot evaluate tensor using `eval()`: No default session is registered. Use `with sess.as_default()` or pass an explicit session to `eval(session=sess)`
So it seems that there is no active graph being run?
Here's a link to my full code
https://colab.research.google.com/drive/1LH343QcKknMeUByjqifZPp2Hepfypz-L
Here is a link to the original question this is based on.
回答1:
eval()
is used with interactive Session. If you use, eval()
, you need to create a Session. However in Estimator, tf. Estimator
will create session underneath for you, and it is not interpretive. Even Eager mode is also not supported.
print(softmax_weights.eval() )
print(softmax_biases.eval() )
print(embeddings.eval() )
print(averaged_embeds.eval() )
Removing these lines would help.
来源:https://stackoverflow.com/questions/53575529/question-on-tensorflow-estimator-practices-should-tensorflow-operations-be-cond