问题
My X_test
are 128x128x3 images and my Y_test
are 512x512x3 images. I want to show, after each epoch, how the input (X_test) looked, how the expected output (Y_test) looked, but also how the actual output looked. So far, I've only figured out how to add the first 2 in Tensorboard. Here is the code that calls the Callback:
model.fit(X_train,
Y_train,
epochs=epochs,
verbose=2,
shuffle=False,
validation_data=(X_test, Y_test),
batch_size=batch_size,
callbacks=get_callbacks())
Here is the Callback's code:
import tensorflow as tf
from keras.callbacks import Callback
from keras.callbacks import TensorBoard
import io
from PIL import Image
from constants import batch_size
def get_callbacks():
tbCallBack = TensorBoard(log_dir='./logs',
histogram_freq=1,
write_graph=True,
write_images=True,
write_grads=True,
batch_size=batch_size)
tbi_callback = TensorBoardImage('Image test')
return [tbCallBack, tbi_callback]
def make_image(tensor):
"""
Convert an numpy representation image to Image protobuf.
Copied from https://github.com/lanpa/tensorboard-pytorch/
"""
height, width, channel = tensor.shape
print(tensor)
image = Image.fromarray(tensor.astype('uint8')) # TODO: maybe float ?
output = io.BytesIO()
image.save(output, format='JPEG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
class TensorBoardImage(Callback):
def __init__(self, tag):
super().__init__()
self.tag = tag
def on_epoch_end(self, epoch, logs={}):
# Load image
img_input = self.validation_data[0][0] # X_train
img_valid = self.validation_data[1][0] # Y_train
print(self.validation_data[0].shape) # (8, 128, 128, 3)
print(self.validation_data[1].shape) # (8, 512, 512, 3)
image = make_image(img_input)
summary = tf.Summary(value=[tf.Summary.Value(tag=self.tag, image=image)])
writer = tf.summary.FileWriter('./logs')
writer.add_summary(summary, epoch)
writer.close()
image = make_image(img_valid)
summary = tf.Summary(value=[tf.Summary.Value(tag=self.tag, image=image)])
writer = tf.summary.FileWriter('./logs')
writer.add_summary(summary, epoch)
writer.close()
return
I'm wondering where/how I can get the actual output of the network.
Another issue I'm having is that here is a sample of one of the images that is being ported into TensorBoard:
[[[0.10909907 0.09341043 0.08224604]
[0.11599099 0.09922747 0.09138277]
[0.15596421 0.13087936 0.11472746]
...
[0.87589591 0.72773653 0.69428956]
[0.87006552 0.7218123 0.68836991]
[0.87054225 0.72794635 0.6967475 ]]
...
[[0.26142332 0.16216267 0.10314116]
[0.31526875 0.18743924 0.12351286]
[0.5499796 0.35461449 0.24772873]
...
[0.80937942 0.62956016 0.53784871]
[0.80906054 0.62843601 0.5368183 ]
[0.81046278 0.62453899 0.53849678]]]
Is that the reason why my image = Image.fromarray(tensor.astype('uint8'))
line might be generating images that do not look at all like the actual output? Here is a sample from TensorBoard:
I did try .astype('float64')
but it launched an error because it is apparently not a type that is supported.
Anyhow, I'm unsure this really is the problem since the rest of my displayed images in the TensorBoard are all just white/gray/black squares (this one right there, conv2D_7
, is actually the very last layer of my network and should thus display the actual images that are outputted, no?):
Ultimately, I would like something like this, which I'm already displaying after the training through matplot:
Finally, I would like to adress the fact that this callback is taking a long time to process. Is there a more efficient way to do that? It almost doubles my training time (probably because it needs to convert the numpy into images before saving them in the TensorBoard Log file).
回答1:
The below code takes input to model, output of model and ground truth and saves to Tensorboard. The model is segmentation, thus 3 images per sample.
The code is quite simple and straightforward. But still a few explanation:-
make_image_tensor
- The method converts the numpy image and creates a tensor to save in tensorboard summary.
TensorboardWriter
- Not required, but its good to keep Tensorboard functionality separate from other modules. Allows Reusability.
ModelDiagonoser
- The class which takes a generator, and predicts over self.model(set by Keras to all callbacks). The ModelDiagonoser takes input, output and groundtruth and passes to Tensorboard to save the images.
import os
import io
import numpy as np
import tensorflow as tf
from PIL import Image
from keras.callbacks import Callback
# Depending on your keras version:-
from keras.engine.training import GeneratorEnqueuer, Sequence, OrderedEnqueuer
#from keras.utils import GeneratorEnqueuer, Sequence, OrderedEnqueuer
def make_image_tensor(tensor):
"""
Convert an numpy representation image to Image protobuf.
Adapted from https://github.com/lanpa/tensorboard-pytorch/
"""
if len(tensor.shape) == 3:
height, width, channel = tensor.shape
else:
height, width = tensor.shape
channel = 1
tensor = tensor.astype(np.uint8)
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channel,
encoded_image_string=image_string)
class TensorboardWriter:
def __init__(self, outdir):
assert (os.path.isdir(outdir))
self.outdir = outdir
self.writer = tf.summary.FileWriter(self.outdir,
flush_secs=10)
def save_image(self, tag, image, global_step=None):
image_tensor = make_image_tensor(image)
self.writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag=tag, image=image_tensor)]),
global_step)
def close(self):
"""
To be called in the end
"""
self.writer.close()
class ModelDiagonoser(Callback):
def __init__(self,
data_generator,
batch_size,
num_samples,
output_dir,
normalization_mean):
self.batch_size = batch_size
self.num_samples = num_samples
self.tensorboard_writer = TensorBoardWriter(output_dir)
self.normalization_mean = normalization_mean
is_sequence = isinstance(self.data_generator, Sequence)
if is_sequence:
self.enqueuer = OrderedEnqueuer(self.data_generator,
use_multiprocessing=True,
shuffle=False)
else:
self.enqueuer = GeneratorEnqueuer(self.data_generator,
use_multiprocessing=True,
wait_time=0.01)
self.enqueuer.start(workers=4, max_queue_size=4)
def on_epoch_end(self, epoch, logs=None):
output_generator = self.enqueuer.get()
steps_done = 0
total_steps = int(np.ceil(np.divide(self.num_samples, self.batch_size)))
sample_index = 0
while steps_done < total_steps:
generator_output = next(output_generator)
x, y = generator_output[:2]
y_pred = self.model.predict(x)
y_pred = np.argmax(y_pred, axis=-1)
y_true = np.argmax(y, axis=-1)
for i in range(0, len(y_pred)):
n = steps_done * self.batch_size + i
if n >= self.num_samples:
return
img = np.squeeze(x[i, :, :, :])
img = 255. * (img + self.normalization_mean) # mean is the training images normalization mean
img = img[:, :, [2, 1, 0]] # reordering of channels
pred = y_pred[i]
pred = pred.reshape(img.shape[0:2])
ground_truth = y_true[i]
ground_truth = ground_truth.reshape(img.shape[0:2])
self.tensorboard_writer.save_image("Epoch-{}/{}/x"
.format(self.epoch_index, sample_index), img)
self.tensorboard_writer.save_image("Epoch-{}/{}/y"
.format(self.epoch_index, sample_index), ground_truth)
self.tensorboard_writer.save_image("Epoch-{}/{}/y_pred"
.format(self.epoch_index, sample_index), pred)
sample_index += 1
steps_done += 1
def on_train_end(self, logs=None):
self.enqueuer.stop()
self.tensorboard_writer.close()
回答2:
Probably the img_input and img_valid are in range of 0 to 1. Converting them to uint8 type will solve the problem.
img_input = self.validation_data[0][0]
# img_input = img_input / np.max(img_input) # if img_input is not in (0,1), rescale it.
img_input = (255*img_input).astype(np.uint8)
img_valid = self.validation_data[1][0] # Y_train
img_valid = (255*img_valid ).astype(np.uint8)
来源:https://stackoverflow.com/questions/52469866/displaying-images-on-tensorboard-through-keras