CS256
Chris Pollett
Oct 25, 2017
x = tf.placeholder(tf.float32)
import tensorflow as tf x = tf.placeholder(tf.float32, shape=(3,3)) session = tf.Session() session.run(x, {x:[[1,2,3],[4,5,6],[7,8,9]]}) #outputs #array([[ 1., 2., 3.], # [ 4., 5., 6.], # [ 7., 8., 9.]], dtype=float32)
x = tf.constant([[1,2],[3,4],[5,6]], tf.float32, shape=(3,2)) session = tf.Session() session.run(x) #outputs #array([[ 1., 2.], # [ 3., 4.], # [ 5., 6.]], dtype=float32)
import tensorflow as tf W = tf.Variable([[0,1,0],[1,-1,1]], dtype=tf.float32) session = tf.Session() init = tf.global_variables_initializer() session.run(init) session.run(W) #outputs # array([[ 0., 1., 0.], # [ 1., -1., 1.]], dtype=float32)
W = tf.get_variable("W", shape=[2,3], initializer = tf.zeros_initializer) init = tf.global_variables_initializer() session = tf.Session() session.run(init) session.run(W) #outputs #array([[ 0., 0., 0.], # [ 0., 0., 0.]], dtype=float32)
import tensorflow as tf def perceptron(weights, inputs, biases, activation): print(tf.shape(weights)) print(tf.shape(inputs)) nodes = tf.matmul(weights, inputs) + biases return activation(nodes) def step(nodes): return tf.ceil(tf.clip_by_value(nodes, 0, 1)) x = tf.placeholder(tf.float32, shape=(2,1)) uniform_init = tf.random_uniform_initializer(0, 1) W1 = tf.get_variable("W1", shape=(2,2), initializer = uniform_init) b1 = tf.get_variable("b1", shape=(2,1), initializer = uniform_init) my_layer1 = perceptron(W1, x, b1, step) session = tf.Session() init = tf.global_variables_initializer() session.run(init) session.run(my_layer1, {x:[[1],[1]]}) #outputs #array([[ 1.], # [ 1.]], dtype=float32)
W2 = tf.get_variable("W2", shape=(2,2), initializer = uniform_init) b2 = tf.get_variable("b2", shape=(2,1), initializer = uniform_init) my_layer2 = perceptron(W2, my_layer1, b2, step)
my_layer3 = tf.nn.softmax(my_layer2)
tf.layers.dense(my_layer2,4, step) # # or we could add a dense layer to the inputs x = tf.placeholder(tf.float32, shape=(2,1)) tf.layers.dense(x,4, step) #outputs #<tf.Tensor 'dense/Ceil:0' shape=(2, 4) dtype=float32>
init = tf.global_variables_initializer() session = tf.Session() session.run(init) #set the function to be used to initialize variables session.run(W) # compute the value of Tensor Object W according to the # current environment
session = tf.Session(config=tf.ConfigProto(log_device_placement=True))If this says: Device mapping: no known devices, then for sure we are not using the GPU.
devices = session.list_devices()
epsilon = 0.01 # the learning rate optimizer = tf.train.GradientDescentOptimizer(epsilon)
loss = tf.reduce_sum(tf.square(linear_model - y)) train = optimizer.minimize(loss) x_train = [1, 2, 3, 4] y_train = [0, -1, -2, -3] init = tf.global_variables_initializer() session = tf.Session() session.run(init) for i in range(1000): session.run(train, {x: x_train, y: y_train})
saver = tf.train.Saver() saver.save(session, "my_trained_model.ckpt")
session = tf.Session() saver = tf.train.Saver() saver.restore(session, "my_trained_model.ckpt")