How to wrap tensorflow graph with placeholder in keras

[亡魂溺海] 提交于 2021-01-28 11:09:41

问题


I have a tensorflow graph (stored in a protobuffer file) with placeholder operations as inputs. I want to wrap this graph as a keras layer or model.

Here is an example:

with tf.Graph().as_default() as gf:
    x = tf.placeholder(tf.float32, shape=(None, 123), name='x')
    c = tf.constant(100, dtype=tf.float32, name='C')
    y = tf.multiply(x, c, name='y')
    with tf.gfile.GFile("test_graph/y.pb", "wb") as f:
        raw = gf.as_graph_def().SerializeToString()
        f.write(raw)

Load back as a tensorflow graph:

persisted_sess = tf.Session()
with persisted_sess.as_default():
    with gfile.FastGFile("./test_graph/y.pb",'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        persisted_sess.graph.as_default()
        tf.import_graph_def(graph_def, name='')
        for i, op in enumerate(persisted_sess.graph.get_operations()):
            tensor = persisted_sess.graph.get_tensor_by_name(op.name + ':0')
            print(i, '\t', op.name, op.type, tensor)
        x_tensor = persisted_sess.graph.get_tensor_by_name('x:0')
        y_tensor = persisted_sess.graph.get_tensor_by_name('y:0')

We can see the x and y operations and tensors:

0    x Placeholder Tensor("x:0", shape=(?, 123), dtype=float32)
1    C Const Tensor("C:0", shape=(), dtype=float32)
2    y Mul Tensor("y:0", shape=(?, 123), dtype=float32)

Then I try to wrap it into a keras model using different method:

Method 1:

output_layer = Lambda(lambda x: y_tensor, name='output_y')(x_tensor)
model = Model(inputs=[x_tensor], outputs=[output_layer])  # ERROR!

This already produce error InvalidArgumentError: You must feed a value for placeholder tensor 'x' with dtype float and shape [?,123] [[{{node x}}]]

Method 2:

input_x = Input(name='x', shape=(123,), dtype='float32')
output_layer = Lambda(lambda x: y_tensor, name='output_y')(input_x)
model = Model(inputs=[input_x], outputs=[output_layer]) # OK

model.predict({'x': np.ones((3, 123), dtype=np.float32)}) # ERROR!

This causes the same error at the predict call.

The closest info I can find relating to my question is this, but it doesn't address the handling of placeholders. What would be the correct way to do this?


回答1:


I figured out the way. We need to use InputLayer instead of Input.

First the codes that create the demo tensorflow graph PB:

def dump_model(): # just to hide all vars during creation demo
    import numpy as np
    import sys
    import tensorflow as tf
    with tf.Graph().as_default() as gf:
        x = tf.placeholder(tf.float32, shape=(None, 123), name='x')
        b = tf.placeholder(tf.float32, shape=(None, 123), name='b')
        c = tf.constant(100, dtype=tf.float32, name='C')
        y = tf.multiply(x, c, name='y')
        z = tf.add(y, x, name='z')
        print(x, b, c, y, z)
        with tf.gfile.GFile("test_graph/y.pb", "wb") as f:
            raw = gf.as_graph_def().SerializeToString()
            print(type(raw), len(raw))
            f.write(raw)
dump_model()

Then import the graph and find the input/output tensors:

import numpy as np
import sys
import tensorflow as tf

persisted_sess = tf.Session()
with tf.Session().as_default() as session:
    with tf.gfile.FastGFile("./test_graph/y.pb",'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        persisted_sess.graph.as_default()
        tf.import_graph_def(graph_def, name='')
        print(persisted_sess.graph.get_name_scope())
        for i, op in enumerate(persisted_sess.graph.get_operations()):
            tensor = persisted_sess.graph.get_tensor_by_name(op.name + ':0')
            print(i, '\t', op.name, op.type, tensor)
        x_tensor = persisted_sess.graph.get_tensor_by_name('x:0')
        b_tensor = persisted_sess.graph.get_tensor_by_name('b:0')
        y_tensor = persisted_sess.graph.get_tensor_by_name('y:0')
        z_tensor = persisted_sess.graph.get_tensor_by_name('z:0')

Then we can create the keras model and make inference:

from tensorflow.keras.layers import Lambda, InputLayer
from tensorflow.keras import Model
from tensorflow.python.util import nest
from tensorflow.python.keras.utils import layer_utils

input_x = InputLayer(name='x', input_tensor=x_tensor)
input_x.is_placeholder = True  # this is the critical bits
input_b = InputLayer(name='b2', input_tensor=b_tensor) # note the keras name can be different than the tf name
input_b.is_placeholder = True
output_y = Lambda(lambda x: y_tensor, name='output_y')(input_x.output)
output_z = Lambda(lambda x_b: z_tensor, name='output_z')([input_x.output, input_b.output])

base_model_inputs = nest.flatten([layer_utils.get_source_inputs(input_x.output),
                                  layer_utils.get_source_inputs(input_b.output)])
base_model = Model(base_model_inputs, [output_y, output_z])
y_out, z_out = base_model.predict({'x': np.ones((3, 123), dtype=np.float32),
                                   'b2': np.full((3, 123), 100.0, dtype=np.float32)})
y_out.shape, z_out.shape

And we can even create a new model from the base model:

from tensorflow.keras.layers import Add
derived_output = Add(name='derived')([output_y, output_z])
derived_model = Model(base_model.inputs, [derived_output])

derived_out = derived_model.predict({'x': np.ones((3, 123), dtype=np.float32),
                                                   'b2': np.full((3, 123), 100.0, dtype=np.float32)})
derived_out.shape


来源:https://stackoverflow.com/questions/59707289/how-to-wrap-tensorflow-graph-with-placeholder-in-keras

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!