The text data is organized as vector with 20,000 elements, like [2, 1, 0, 0, 5, ...., 0]. i-th element indicates the frequency of the i-th word in a text.
The ground t
Change relu to sigmoid of output layer. Modify cross entropy loss to explicit mathematical formula of sigmoid cross entropy loss (explicit loss was working in my case/version of tensorflow )
import tensorflow as tf
# hidden Layer
class HiddenLayer(object):
def __init__(self, input, n_in, n_out):
self.input = input
w_h = tf.Variable(tf.random_normal([n_in, n_out],mean = 0.0,stddev = 0.05))
b_h = tf.Variable(tf.zeros([n_out]))
self.w = w_h
self.b = b_h
self.params = [self.w, self.b]
def output(self):
linarg = tf.matmul(self.input, self.w) + self.b
self.output = tf.nn.relu(linarg)
return self.output
# output Layer
class OutputLayer(object):
def __init__(self, input, n_in, n_out):
self.input = input
w_o = tf.Variable(tf.random_normal([n_in, n_out], mean = 0.0, stddev = 0.05))
b_o = tf.Variable(tf.zeros([n_out]))
self.w = w_o
self.b = b_o
self.params = [self.w, self.b]
def output(self):
linarg = tf.matmul(self.input, self.w) + self.b
#changed relu to sigmoid
self.output = tf.nn.sigmoid(linarg)
return self.output
# model
def model():
h_layer = HiddenLayer(input = x, n_in = 20000, n_out = 1000)
o_layer = OutputLayer(input = h_layer.output(), n_in = 1000, n_out = 4000)
# loss function
out = o_layer.output()
# modified cross entropy to explicit mathematical formula of sigmoid cross entropy loss
cross_entropy = -tf.reduce_sum( ( (y_*tf.log(out + 1e-9)) + ((1-y_) * tf.log(1 - out + 1e-9)) ) , name='xentropy' )
# regularization
l2 = (tf.nn.l2_loss(h_layer.w) + tf.nn.l2_loss(o_layer.w))
lambda_2 = 0.01
# compute loss
loss = cross_entropy + lambda_2 * l2
# compute accuracy for single label classification task
correct_pred = tf.equal(tf.argmax(out, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, "float"))
return loss, accuracy