tensorflowneural-networktflearn

How can we get the values of hidden layer nodes in Tensorflow/Tflearn?


Here's a code for XOR in tflearn. I wish to get the values of penultimate hidden layers nodes (and not the weights). How can I get that? More specifically, I wish to obtain the values of layer 2 nodes (given in the code) for each of the four prediction as given below.

import tensorflow as tf
import tflearn

X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]  #input
Y_xor = [[0.], [1.], [1.], [0.]]  #input_labels

# Graph definition
with tf.Graph().as_default():
    tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
    net = tflearn.input_data(shape=[None, 2], name='inputLayer')
    net = tflearn.fully_connected(net, 2, activation='sigmoid', weights_init=tnorm, name='layer1')
    net = tflearn.fully_connected(net, 1, activation='softmax', weights_init=tnorm, name='layer2')
    regressor = tflearn.regression(net, optimizer='sgd', learning_rate=2., loss='mean_square', name='layer3')

    # Training
    m = tflearn.DNN(regressor)
    m.fit(X, Y_xor, n_epoch=100, snapshot_epoch=False) 

    # Testing
    print("Testing XOR operator")
    print("0 xor 0:", m.predict([[0., 0.]]))
    print("0 xor 1:", m.predict([[0., 1.]]))
    print("1 xor 0:", m.predict([[1., 0.]]))
    print("1 xor 1:", m.predict([[1., 1.]]))

    layer1_var = tflearn.variables.get_layer_variables_by_name('layer1')
    layer2_var = tflearn.variables.get_layer_variables_by_name('layer2')
    inputLayer_var = tflearn.variables.get_layer_variables_by_name('inputLayer')

    #result = tf.matmul(inputLayer_var, layer1_var[0]) + layer1_var[1]

    with m.session.as_default():
        print(tflearn.variables.get_value(layer1_var[0]))   #layer1 weights
        print(tflearn.variables.get_value(layer1_var[1]))   #layer1 bias
        print(tflearn.variables.get_value(layer2_var[0]))   #layer2 weights
        print(tflearn.variables.get_value(layer2_var[1]))   #layer2 bias

Solution

  • You can re-use a new model that share a same session (to use same weights): . Note that you could also save your 'm' model and load it with 'm2', that gives similar results.

    import tensorflow as tf
    import tflearn
    
    X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
    Y_xor = [[0.], [1.], [1.], [0.]]
    
    # Graph definition
    with tf.Graph().as_default():
        tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
        net = tflearn.input_data(shape=[None, 2], name='inputLayer')
        layer1 = tflearn.fully_connected(net, 2, activation='sigmoid', weights_init=tnorm, name='layer1')
        layer2 = tflearn.fully_connected(layer1, 1, activation='softmax', weights_init=tnorm, name='layer2')
        regressor = tflearn.regression(layer2, optimizer='sgd', learning_rate=2., loss='mean_square', name='layer3')
    
        # Training
        m = tflearn.DNN(regressor)
        m.fit(X, Y_xor, n_epoch=100, snapshot_epoch=False) 
    
        # Testing
        print("Testing XOR operator")
        print("0 xor 0:", m.predict([[0., 0.]]))
        print("0 xor 1:", m.predict([[0., 1.]]))
        print("1 xor 0:", m.predict([[1., 0.]]))
        print("1 xor 1:", m.predict([[1., 1.]]))
    
        # You can create a new model, that share the same session (to get same weights)
        # Or you can also simply save and load a model
        m2 = tflearn.DNN(layer1, session=m.session)
        print(m2.predict([[0., 0.]]))