Runtime Error: Disconnected graph for GANs because input can't be obtained

江枫思渺然 提交于 2019-12-10 11:08:18

问题


Here is my discriminator architecture:

def build_discriminator(img_shape,embedding_shape):

    model1 = Sequential()

    model1.add(Conv2D(32, kernel_size=5, strides=2, input_shape=img_shape, padding="same"))
    model1.add(LeakyReLU(alpha=0.2))
    model1.add(Dropout(0.25))
    model1.add(Conv2D(48, kernel_size=5, strides=2, padding="same"))
    #model.add(ZeroPadding2D(padding=((0,1),(0,1))))
    model1.add(BatchNormalization(momentum=0.8))
    model1.add(LeakyReLU(alpha=0.2))
    model1.add(Dropout(0.25))
    model1.add(Conv2D(64, kernel_size=5, strides=2, padding="same"))
    model1.add(BatchNormalization(momentum=0.8))
    model1.add(LeakyReLU(alpha=0.2))
    model1.add(Dropout(0.25))
    model1.add(Conv2D(128, kernel_size=5, strides=2, padding="same"))
    model1.add(BatchNormalization(momentum=0.8))
    model1.add(LeakyReLU(alpha=0.2))
    model1.add(Dropout(0.25))
    model1.add(Conv2D(256, kernel_size=5, strides=2, padding="same"))
    model1.add(BatchNormalization(momentum=0.8))
    model1.add(LeakyReLU(alpha=0.2))
    model1.add(Dropout(0.25))
    model1.add(Flatten())
    model1.add(Dense(200))

    model2=Sequential()
    model2.add(Dense(50, input_shape=embedding_shape))
    model2.add(Dense(100))
    model2.add(Dense(200))
    model2.add(Flatten())
    merged_model = Sequential()
    merged_model.add(Merge([model1, model2], mode='concat'))

    merged_model.add(Dense(1, activation='sigmoid', name='output_layer'))
    #merged_model.compile(loss='binary_crossentropy', optimizer='adam', 
    #metrics=['accuracy'])
    #model1.add(Dense(1, activation='sigmoid'))

    merged_model.summary()
    merged_model.input_shape

    img = Input(shape=img_shape)
    emb = Input(shape=embedding_shape)
    validity = merged_model([img,emb])

    return Model([img,emb],validity)

and here is the generator architecture:

def build_generator(latent_dim=484):
    model = Sequential()

    model.add(Dense(624 * 2 * 2, activation="relu", input_dim=latent_dim))
    model.add(Reshape((2, 2, 624)))
    model.add(UpSampling2D())

    model.add(Conv2D(512, kernel_size=5, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(UpSampling2D())
    #4x4x512
    model.add(Conv2D(256, kernel_size=5, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(UpSampling2D())
    #8x8x256
    model.add(Conv2D(128, kernel_size=5, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(UpSampling2D())
    #16x16x128
    model.add(Conv2D(64, kernel_size=5, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(UpSampling2D())
    #32x32x64
    model.add(Conv2D(32, kernel_size=5, padding="same"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation("relu"))
    model.add(UpSampling2D())
    #64x64x32
    model.add(Conv2D(3, kernel_size=5, padding="same"))
    model.add(Activation("tanh"))
    #128x128x3

    noise = Input(shape=(latent_dim,))
    img = model(noise)
    return Model(noise, img)

and here is how I am making the GAN network:

optimizer = Adam(0.0004, 0.5)

discriminator=build_discriminator((128,128,3),(1,128,3))
discriminator.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

# Build the generator
generator = build_generator()

# The generator takes noise as input and generates imgs
z = Input(shape=(100+384,))
img = generator(z)
# For the combined model we will only train the generator
discriminator.trainable = False
temp=Input(shape=(1,128,3))
# The discriminator takes generated images as input and determines validity
valid = discriminator([img,temp])

# The combined model  (stacked generator and discriminator)
# Trains the generator to fool the discriminator
combined = Model(z, valid)
combined.compile(loss='binary_crossentropy', optimizer=optimizer)

The discriminator have 2 models, and will get as input an image of shape 128x128x3 and an embedding of shape 1x128x3 and both models are merged then. The generator model just gets noise and generates a 128x128x3 image. So at the line combined = Model(z, valid) I am getting the followiing error:

RuntimeError: Graph disconnected: cannot obtain value for tensor Tensor("input_5:0", shape=(?, 1, 128, 3), dtype=float32) at layer "input_5". The following previous layers were accessed without issue: ['input_4', 'model_2']

which I think is because of the fact that discriminator can't find embedding input but I am feeding it a tensor of shape (1,128,3), just like noise is being fed to the generator model. Can anyone please help me where I am doing wrong?

And after everything is set here is how I will generate images from noise and embedding vector merged together and discriminator will take image and vector to identify fakes:

#texts has embedding vectors
pics=np.array(pics) .  #images
noise = np.random.normal(0, 1, (batch_size, 100))
j=0
latent_code=[]
for j in range(len(texts)):     #appending embedding at the end of noise           
    n=np.append(noise[j],texts[j])
    n=n.tolist()
    latent_code.append(n)
latent_code=np.array(latent_code)
gen_imgs = generator.predict(latent_code)       #gen making fakes  
j=0
vects=[]
for im in gen_imgs:                             
    t=np.array(texts[j])
    t=np.reshape(t,[128,3])
    t=np.expand_dims(t, axis=0)
    vects.append(t)
    j+=1
vects=np.array(vects)   #vector of ?,1,128,3
#disc marking fakes and reals
d_loss_real = discriminator.train_on_batch([pics,vects], valid)
d_loss_fake = discriminator.train_on_batch([gen_pics,vects], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss = combined.train_on_batch(latent_code, valid)

回答1:


You have forgotten to add the temp as one of the inputs of the GAN (that's why the error says it can't feed the corresponding tensor since it is essentially disconnected):

combined = Model([z, temp], valid)

As a side note, I highly recommend to use Keras Functional API for building complicated and multi branch models like your discriminator. It is much easier to use, being more flexible and less error-prone.

For example, this is the descriminator you have written but I have rewritten it using Functional API. I personally think it is much easier to follow:

def build_discriminator(img_shape,embedding_shape):

    input_img = Input(shape=img_shape)
    x = Conv2D(32, kernel_size=5, strides=2, padding="same")(input_img)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dropout(0.25)(x)
    x = Conv2D(48, kernel_size=5, strides=2, padding="same")(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dropout(0.25)(x)
    x = Conv2D(64, kernel_size=5, strides=2, padding="same")(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dropout(0.25)(x)
    x = Conv2D(128, kernel_size=5, strides=2, padding="same")(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dropout(0.25)(x)
    x = Conv2D(256, kernel_size=5, strides=2, padding="same")(x)
    x = BatchNormalization(momentum=0.8)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Dropout(0.25)(x)
    x = Flatten()(x)
    output_img = Dense(200)(x)

    input_emb = Input(shape=embedding_shape)
    y = Dense(50)(input_emb)
    y = Dense(100)(y)
    y = Dense(200)(y)
    output_emb = Flatten()(y)

    merged = concatenate([output_img, output_emb])
    output_merge = Dense(1, activation='sigmoid', name='output_layer')(merged)

    return Model([input_img, input_emb], output_merge)


来源:https://stackoverflow.com/questions/51211443/runtime-error-disconnected-graph-for-gans-because-input-cant-be-obtained

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!