问题
I decided to update tensorboard because it wasn't showing the graph, on the graph panel all I could see was a blank page with no error message. Now that I have updated the graph, is the only thing my tensorboard shows. Now I cannot see scalars or histograms. I have the:
No scalar data was found.
message, and the same for histograms etc.
This is the relevant parts of my code:
def train_model(self):
with tf.Session(graph=self.graph) as session:
session.run(tf.global_variables_initializer())#Now all variables should be initialized.
print("Uninitialized variables: ", session.run(tf.report_uninitialized_variables())) #Just to check, should print nothing
self.train_writer = tf.summary.FileWriter(self.default_folder+"/logs/train", graph=session.graph)
self.test_writer = tf.summary.FileWriter(self.default_folder+"/logs/test", graph=session.graph)
print("Training for ", self.n_steps)
for step in range(self.n_steps):
feed_train = self._create_feed_dict(self.X_train,step)
feed_test = self._create_feed_dict(self.X_test,step)
session.run(self.optimizer, feed_dict = {self.v_clamp: feed_train})
#Get the loss in tensorborad
loss_train = session.run(self.summary_loss, feed_dict = {self.v_clamp: feed_train})
self.train_writer.add_summary(loss_train, step)
self.train_writer.flush()
loss_test = session.run(self.summary_loss, feed_dict = {self.v_clamp: feed_test})
self.test_writer.add_summary(loss_test, step)
self.test_writer.flush()
if (step%1000)==0:
histograms = session.run(self.merged, feed_dict = {self.v_clamp: feed_train})
self.train_writer.add_summary(histograms, step)
self.train.flush()
self.saver.save(session, self.default_folder + self.model_name, global_step = step)
print(step)
I created these variables inside my build_model method.
def _build_model():
...
with tf.variable_scope("hidden_layer_1"):
self.W = tf.get_variable(name = "w", dtype = tf.float32, shape=[self.n_visibles, self.n_hidden], initializer=tf.random_uniform_initializer(maxval=0.01,minval=-0.01))
hist1=tf.summary.histogram("hidden_layer_1" + '/weights', self.W)
self.bh = tf.get_variable(name = "b_h", dtype = tf.float32, shape=[self.n_hidden], initializer=tf.random_uniform_initializer(maxval=0.01,minval=-0.01))
hist2=tf.summary.histogram("hidden_layer_1" + '/biases', self.bh)
if self.hidden_activation == 'sigma':
self.h_clamp = tf.nn.sigmoid(tf.matmul(self.v_clamp,self.W) + self.bh)
elif self.hidden_activation == 'relu':
self.h_clamp = tf.nn.relu(tf.matmul(self.v_clamp,self.W) + self.bh)
elif self.hidden_activation == 'tanh':
self.h_clamp = tf.nn.tanh(tf.matmul(self.v_clamp,self.W) + self.bh)
hist3=tf.summary.histogram("hidden_layer_1" + '/h', self.h_clamp)
...
#Get loss in tensorboard
with tf.variable_scope("compute_loss"):
self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.v_clamp - v_free)))
self.summary_loss = tf.summary.scalar("loss", self.loss_function) #this is what we need to run!
self.merged = tf.summary.merge([hist1,hist2,hist3])
#Object to save variable weights and more
self.saver = tf.train.Saver()
My default_folder is "./models/GSSU-RBM/". and my model_name is "myRBM".
I execute tensorboard like this:
tensorboard --logdir "C:\Users\isaac\Desktop\TFM\models\GSSU-RBM"
And I get the following message:
Exception in thread Reloader:
Traceback (most recent call last):
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\threading.py", line 914, in _bootstrap_inner
self.run()
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\backend\application.py", line 361, in _reload_forever
reload_multiplexer(multiplexer, path_to_run)
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\backend\application.py", line 335, in reload_multiplexer
multiplexer.Reload()
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\backend\event_processing\plugin_event_multiplexer.py", line 195, in Reload
accumulator.Reload()
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\backend\event_processing\plugin_event_accumulator.py", line 186, in Reload
self._ProcessEvent(event)
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\backend\event_processing\plugin_event_accumulator.py", line 332, in _ProcessEvent
value = data_compat.migrate_value(value)
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\data_compat.py", line 57, in migrate_value
return handler(value) if handler else value
File "c:\users\isaac\anaconda3\envs\tensorflow-gpu\lib\site-packages\tensorboard\data_compat.py", line 106, in _migrate_scalar_value
tensor_proto = tf.make_tensor_proto(scalar_value)
AttributeError: module 'tensorflow' has no attribute 'make_tensor_proto'
BTW I'm running this with tensorflow-gpu on Windows 10, on a jupyter notebook on anaconda. I installed tensorboard like pip install tensorboard
and tensorflow-gpu through conda.
EDIT: Also, the edit from my --inspect
Doing: tensorboard --inspect --logdir "C:\Users\isaac\Desktop\TFM\models\GSSU-RBM"
I get:
These tags are in C:\Users\isaac\Desktop\TFM\models\GSSU-RBM\logs\test:
audio -
histograms -
images -
scalars
compute_loss/loss
tensor -
======================================================================
Event statistics for C:\Users\isaac\Desktop\TFM\models\GSSU-RBM\logs\test:
audio -
graph
first_step 0
last_step 0
max_step 0
min_step 0
num_steps 1
outoforder_steps []
histograms -
images -
scalars
first_step 0
last_step 84487
max_step 84487
min_step 0
num_steps 84488
outoforder_steps []
sessionlog:checkpoint -
sessionlog:start -
sessionlog:stop -
tensor -
======================================================================
These tags are in C:\Users\isaac\Desktop\TFM\models\GSSU-RBM\logs\train:
audio -
histograms
hidden_layer_1/hidden_layer_1/biases
hidden_layer_1/hidden_layer_1/h
hidden_layer_1/hidden_layer_1/weights
images -
scalars
compute_loss/loss
tensor -
======================================================================
Event statistics for C:\Users\isaac\Desktop\TFM\models\GSSU-RBM\logs\train:
audio -
graph
first_step 0
last_step 0
max_step 0
min_step 0
num_steps 1
outoforder_steps []
histograms
first_step 0
last_step 84000
max_step 84000
min_step 0
num_steps 85
outoforder_steps []
images -
scalars
first_step 0
last_step 84599
max_step 84599
min_step 0
num_steps 84600
outoforder_steps []
sessionlog:checkpoint -
sessionlog:start -
sessionlog:stop -
tensor -
======================================================================
回答1:
Solved! In case someone is in the same situation, the solution was to uninstall conda tensorflow and install it through pip. It gaves you later an error requesting for an specific cuda and cudnn. Once you install these two it should work.
来源:https://stackoverflow.com/questions/48684755/tensorboard-doesnt-show-scalars-anymore