Patch based image training and combine their probability from an image

被刻印的时光 ゝ 提交于 2021-02-11 13:00:15

问题


Firstly, I have implemented a simple VGG16 network for image classification.

model = keras.applications.vgg16.VGG16(include_top = False,
                weights = None,
                input_shape = (32,32,3),
                pooling = 'max',
                classes = 10)

Whose input shape is 32 x 32. Now, I am trying to implement a patch-based neural network. The main idea is, from the input image, extract 4 image patch like this image,

and train the extracted patch image(resizing to 32 x 32 as it is input shape of our model) finally, combine their four output probability and find the final output result (Using normalizing & argmax). Like this,

How can I do that?

Thanks in advance for your help.

Note:

I am guessing using lambda layer it can be possible.

My simple VGG classification implementation is here in Colab.


回答1:


I used the MNIST dataset to get every image as 4 patches with tf.image.extract_patches, which are subsequently passed as a batch:

import tensorflow as tf
from tensorflow import keras as K
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from tensorflow import nn as nn
from functools import partial
import matplotlib.pyplot as plt

(xtrain, ytrain), (xtest, ytest) = tf.keras.datasets.mnist.load_data()

train = tf.data.Dataset.from_tensor_slices((xtrain, ytrain))
test = tf.data.Dataset.from_tensor_slices((xtest, ytest))

patch_s = 18
stride = xtrain.shape[1] - patch_s

get_patches = lambda x, y: (tf.reshape(
    tf.image.extract_patches(
        images=tf.expand_dims(x[..., None], 0),
        sizes=[1, patch_s, patch_s, 1],
        strides=[1, stride, stride, 1],
        rates=[1, 1, 1, 1],
        padding='VALID'), (4, patch_s, patch_s, 1)), y)

train = train.map(get_patches)
test = test.map(get_patches)

fig = plt.figure()
plt.subplots_adjust(wspace=.1, hspace=.2)
images, labels = next(iter(train))
for index, image in enumerate(images):
    ax = plt.subplot(2, 2, index + 1)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.imshow(image)
plt.show()

Then, in the training loop, I'm getting the loss for every one of these 4 outputs:

def compute_loss(model, x, y, training):
  out = model(x=x, training=training)
  repeated_y = tf.repeat(tf.expand_dims(y, 0), repeats=4, axis=0)
  loss = loss_object(y_true=repeated_y, y_pred=out, from_logits=True)
  loss = tf.reduce_mean(loss, axis=0)
  return loss

Then I'm reducing the mean of axis 0 to merge all probabilities together. Here's the full running code:

import tensorflow as tf
from tensorflow import keras as K
from tensorflow.keras.layers import Conv2D, Flatten, Dense, MaxPooling2D, Dropout
from tensorflow import nn as nn
from functools import partial
import matplotlib.pyplot as plt

(xtrain, ytrain), (xtest, ytest) = tf.keras.datasets.mnist.load_data()

train = tf.data.Dataset.from_tensor_slices((xtrain, ytrain))
test = tf.data.Dataset.from_tensor_slices((xtest, ytest))

patch_s = 18
stride = xtrain.shape[1] - patch_s

get_patches = lambda x, y: (tf.reshape(
    tf.image.extract_patches(
        images=tf.expand_dims(x[..., None], 0),
        sizes=[1, patch_s, patch_s, 1],
        strides=[1, stride, stride, 1],
        rates=[1, 1, 1, 1],
        padding='VALID'), (4, patch_s, patch_s, 1)), y)

train = train.map(get_patches)
test = test.map(get_patches)

fig = plt.figure()
plt.subplots_adjust(wspace=.1, hspace=.2)
images, labels = next(iter(train))
for index, image in enumerate(images):
    ax = plt.subplot(2, 2, index + 1)
    ax.set_xticks([])
    ax.set_yticks([])
    ax.imshow(image)
plt.show()

def prepare(inputs, targets):
    inputs = tf.divide(x=inputs, y=255)
    targets = tf.one_hot(indices=targets, depth=10)
    return inputs, targets

train = train.take(10_000).map(prepare)
test = test.take(10_00).map(prepare)

class MyCNN(K.Model):
    def __init__(self):
        super(MyCNN, self).__init__()
        Conv = partial(Conv2D, kernel_size=(3, 3), activation=nn.relu)
        MaxPool = partial(MaxPooling2D, pool_size=(2, 2))

        self.conv1 = Conv(filters=16)
        self.maxp1 = MaxPool()
        self.conv2 = Conv(filters=32)
        self.maxp2 = MaxPool()
        self.conv3 = Conv(filters=64)
        self.maxp3 = MaxPool()
        self.flatt = Flatten()
        self.dens1 = Dense(64, activation=nn.relu)
        self.drop1 = Dropout(.5)
        self.dens2 = Dense(10, activation=nn.softmax)

    def call(self, inputs, training=None, **kwargs):
        x = self.conv1(inputs)
        x = self.maxp1(x)
        x = self.conv2(x)
        x = self.maxp2(x)
        x = self.conv3(x)
        x = self.maxp3(x)
        x = self.flatt(x)
        x = self.dens1(x)
        x = self.drop1(x)
        x = self.dens2(x)
        return x

model = MyCNN()

loss_object = tf.losses.categorical_crossentropy

def compute_loss(model, x, y, training):
  out = model(inputs=x, training=training)
  repeated_y = tf.repeat(tf.expand_dims(y, 0), repeats=4, axis=0)
  loss = loss_object(y_true=repeated_y, y_pred=out, from_logits=True)
  loss = tf.reduce_mean(loss, axis=0)
  return loss

def get_grad(model, x, y):
    with tf.GradientTape() as tape:
        loss = compute_loss(model, x, y, training=False)
    return loss, tape.gradient(loss, model.trainable_variables)

optimizer = tf.optimizers.Adam()

verbose = "Epoch {:2d}" \
          " Loss: {:.3f} Acc: {:.3%} TLoss: {:.3f} TAcc: {:.3%}"

for epoch in range(1, 10 + 1):
    train_loss = tf.metrics.Mean()
    train_acc = tf.metrics.CategoricalAccuracy()
    test_loss = tf.metrics.Mean()
    test_acc = tf.metrics.CategoricalAccuracy()

    for x, y in train:
        loss_value, grads = get_grad(model, x, y)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))
        train_loss.update_state(loss_value)
        train_acc.update_state(y, model(x, training=True))

    for x, y in test:
        loss_value, _ = get_grad(model, x, y)
        test_loss.update_state(loss_value)
        test_acc.update_state(y, model(x, training=False))

    print(verbose.format(epoch,
                         train_loss.result(),
                         train_acc.result(),
                         test_loss.result(),
                         test_acc.result()))

Spoiler alert: with such small patches, it doesn't do well. Make patches bigger than 18/28 for better performance.



来源:https://stackoverflow.com/questions/65329994/patch-based-image-training-and-combine-their-probability-from-an-image

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!