Loss and precision are 0 when using a neural network with one output neural tensor
I am writing a binary classifier for a specific task and instead of using 2 neurons in the output layer I only want to use one with a sigmoid function and basically output class 0 if it is less than 0.5 and 1 otherwise.
Images are loaded, resized to 64x64, and squished to create a fax problem.) The code to download the data will be present at the end. I am creating placeholders.
x = tf.placeholder('float',[None, 64*64])
y = tf.placeholder('float',[None, 1])
and define the model as follows.
def create_model_linear(data):
fcl1_desc = {'weights': weight_variable([4096,128]), 'biases': bias_variable([128])}
fcl2_desc = {'weights': weight_variable([128,1]), 'biases': bias_variable([1])}
fc1 = tf.nn.relu(tf.matmul(data, fcl1_desc['weights']) + fcl1_desc['biases'])
fc2 = tf.nn.sigmoid(tf.matmul(fc1, fcl2_desc['weights']) + fcl2_desc['biases'])
return fc2
functions weight_variable
and bias_variable
just return a of the tf.Variable()
given form. (The code for them is also at the end.)
Then I define the learning function as follows.
def train(x, hm_epochs):
prediction = create_model_linear(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits = prediction, labels = y) )
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
batch_size = 100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = train_x[start:end]
batch_y = train_y[start:end]
_, c = sess.run([optimizer, cost], feed_dict = {x:batch_x, y:batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch+1, 'completed out of', hm_epochs,'loss:',epoch_loss)
correct = tf.greater(prediction,[0.5])
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
i = 0
acc = []
while i < len(train_x):
acc +=[accuracy.eval({x:train_x[i:i+1000], y:train_y[i:i + 1000]})]
i+=1000
print sum(acc)/len(acc)
the output train(x, 10)
is
('Epoch', 1, 'completed from', 10, 'loss:', 0.0) ("Epoch", 2, "completed", 10, "loss:", 0.0) ("Epoch", 3, "completed from ", 10," loss: ", 0.0) (" Epoch ", 4," complete ", 10," loss: ", 0.0) (" Epoch ", 5," completed from ", 10," loss: " , 0.0) ("Epoch", 6, "Completed", 10, "Lost:", 0.0) ("Epoch", 7, "Completed", 10, "Lost:", 0.0) ("Epoch", 8, "completed from", 10, "lost:", 0.0) ("Epoch", 9, "completed", 10, "lost:", 0.0) ("Epoch", 10, "completed from", 10, "lost : ", 0.0)
0.0 What am I missing?
And here is the promised code for all the utility functions:
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def getLabel(wordlabel):
if wordlabel == 'Class_A':
return [1]
elif wordlabel == 'Class_B':
return [0]
else:
return -1
def loadImages(pathToImgs):
images = []
labels = []
filenames = os.listdir(pathToImgs)
imgCount = 0
for i in tqdm(filenames):
wordlabel = i.split('_')[1]
oneHotLabel = getLabel(wordlabel)
img = cv2.imread(pathToImgs + i,cv2.IMREAD_GRAYSCALE)
if oneHotLabel != -1 and type(img) is np.ndarray:
images += [cv2.resize(img,(64,64)).flatten()]
labels += [oneHotLabel]
imgCount+=1
print imgCount
return (images,labels)
source to share
I think you should use tf.nn.sigmoid_cross_entropy_with_logits
instead tf.nn.softmax_cross_entropy_with_logits
because you are using sigmoid and 1 neuron in the output layer.
Also you need to remove the sigmoid from the last layer in create_model_linear
and you are not using a label y
, the precision should be of the following shape.
correct = tf.equal(tf.greater(tf.nn.sigmoid(prediction),[0.5]),tf.cast(y,'bool'))
source to share