Tensorflow: strange behavior in dynamic_rnn: print input changes output

Notice the strange strange behavior in tf.nn.dynamic_rnn: given the script below, with a random set of seeds (line 7), simply adding "print training_data * 2" or any other number will change the training result. Does anyone have an idea why this is happening? Try it yourself by adding / removing this line

Note that assuming a random seed is given, when no training data is printed, the training output is always the same, and when printed, it always differs at the same cost, no matter what n is.

import tensorflow as tf
import numpy

tf.set_random_seed(0x1111)

def generate_data(dataset_len=10, vec_len=5):
    assert vec_len < dataset_len
    def generate_datapoint(period, vec_length, datapont_len):
        indexes = numpy.array([i % period for i in range(datapont_len)])
        vectors = numpy.zeros((datapont_len, vec_length))
        vectors[numpy.arange(datapont_len), indexes] = 1
        return vectors
    return [generate_datapoint(n % vec_len + 1, vec_len, n) for n in range(10, 10 + dataset_len)]

dataset = generate_data(20, 7)

# input and output are shifted by one
datapoints = [datapoint[:-1].astype('float32') for datapoint in dataset]
output_datapoints = [datapoint[1:].astype('float32') for datapoint in dataset]

print "test and training data loaded"

training_data = tf.placeholder(tf.float32, [1, None, datapoints[0].shape[1]]) # Number of examples, sequence_len, vector_len
expected_output = tf.placeholder(tf.float32, [1, None, output_datapoints[0].shape[1]])

lstm_cell = tf.contrib.rnn.BasicLSTMCell(50)
multi_lstm_cell = tf.contrib.rnn.MultiRNNCell(cells=[lstm_cell] * 2, state_is_tuple=True)

print training_data * 2

lstm_output, state = tf.nn.dynamic_rnn(multi_lstm_cell, training_data, dtype=tf.float32)
output = tf.layers.dense(lstm_output, output_datapoints[0].shape[1], use_bias=False)

cost = tf.reduce_mean((output - expected_output)**2)
optimizer = tf.train.AdamOptimizer().minimize(cost)
init_op = tf.global_variables_initializer()

sess = tf.Session()
sess.run(init_op)

for epoch in range(100):
    epoch_costs = []
    for inp, out in zip(datapoints, output_datapoints):
        o, c, _opt = sess.run([output, cost, optimizer],
                        feed_dict={training_data: [inp],
                                   expected_output: [out]})
        epoch_costs.append(c)
    print "Epoch ", str(epoch), numpy.mean(epoch_costs)
sess.close()

      

+3


source to share





All Articles