tf.constant and tf.placeholder behave differently

柔情痞子 提交于 2019-12-20 04:13:37

问题


I want to wrap the tf.metrics around a Sonnet module for measuring performance of each batch, and the following is the work I have done:

import tensorflow as tf
import sonnet as snt

class Metrics(snt.AbstractModule):
    def __init__(self, indicator, summaries = None, name = "metrics"):
        super(Metrics, self).__init__(name = name)
        self._indicator = indicator
        self._summaries = summaries

    def _build(self, labels, logits):
        if self._indicator == "accuracy":
            metric, metric_update = tf.metrics.accuracy(labels, logits)
            with tf.control_dependencies([metric_update]):
                outputs = tf.identity(metric)
        elif self._indicator == "precision":
            metric, metric_update = tf.metrics.precision(labels, logits)
            with tf.control_dependencies([metric_update]):
                outputs = tf.identity(metric)
        elif self._indicator == "recall":
            metric, metric_update = tf.metrics.recall(labels, logits)
            with tf.control_dependencies([metric_update]):
                outputs = tf.identity(metric)
        elif self._indicator == "f1_score":
            metric_recall, metric_update_recall = tf.metrics.recall(labels, logits)
            metric_precision, metric_update_precision = tf.metrics.precision(labels, logits)
            with tf.control_dependencies([metric_update_recall, metric_update_precision]):
                outputs = 2.0 / (1.0 / metric_recall + 1.0 / metric_precision)
        else:
            raise ValueError("unsupported metrics")

        if type(self._summaries) == list:
            self._summaries.append(tf.summary.scalar(self._indicator, outputs))

        return outputs

However, when I want to test the module, the following code works:

def test3():
    import numpy as np

    labels = tf.constant([1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], tf.int32)
    logits = tf.constant([1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], tf.int32)

    metrics = Metrics("accuracy")
    accuracy = metrics(labels, logits)

    metrics2 = Metrics("f1_score")
    f1_score = metrics2(labels, logits)

    writer = tf.summary.FileWriter("utils-const", tf.get_default_graph())
    with tf.Session() as sess:
        sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

        accu, f1 = sess.run([accuracy, f1_score])
        print(accu)
        print(f1)

    writer.close()

However the following code does NOT work:

def test4():
    from tensorflow.python import debug as tf_debug
    import numpy as np

    tf_labels = tf.placeholder(dtype=tf.int32, shape=[None])
    tf_logits = tf.placeholder(dtype=tf.int32, shape=[None])

    labels = np.array([1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], np.int32)
    logits = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], np.int32)

    metrics = Metrics("accuracy")
    accuracy = metrics(tf_labels, tf_logits)

    metrics2 = Metrics("f1_score")
    f1_score = metrics2(tf_labels, tf_logits)

    writer = tf.summary.FileWriter("utils-feed", tf.get_default_graph())
    with tf.Session() as sess:
        sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

        sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        accu, f1 = sess.run([accuracy, f1_score], feed_dict = {tf_labels: labels, tf_logits: logits})
        print(accu)
        print(f1)

    writer.close()

The output of test3() is correct, 0.88. The output of test4() is wrong, 0.0. However, they should be equivalent.

Anyone has any idea?


回答1:


Are you sure it is not the tf.constant version that fails? I find tf.metrics having a weird behavior in combination with tf.constant:

import tensorflow as tf

a = tf.constant(1.)
mean_a, mean_a_uop = tf.metrics.mean(a)
with tf.control_dependencies([mean_a_uop]):
  mean_a = tf.identity(mean_a)

sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()

for _ in range(10):
  print(sess.run(mean_a))

returns, when run on the GPU,

0.0
2.0
1.5
1.3333334
1.25
1.2
1.1666666
1.1428572
1.125
1.1111112

instead of 1s. It looks as if the count is lagging by one. (I am assuming the first value would be inf but is zero due to some conditions on count). A placeholder version of this code is running as expected on the other hand.

On the CPU, the behavior is even weirder, as the output is non-deterministic. Example of output:

0.0
1.0
1.0
0.75
1.0
1.0
0.85714287
0.875
1.0
0.9

Looks like a bug you could log on tensorflow's github repo. (Note that using running metrics on constants is less than useful -- but it is still a bug).

EDIT Now I also stumbled on weird examples with a tf.placeholder, it seems that tf.metrics has a bug that is unfortunately not limited to its use with tf.constants.



来源:https://stackoverflow.com/questions/50392027/tf-constant-and-tf-placeholder-behave-differently

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!