Tensorflow多層感知機
from tensorflow.examples.tutorials.mnist import input_dataimport tensorflow as tfmnist = input_data.read_data_sets("MNIST_data/", one_hot=True)in_units = 784h1_units = 300W1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))b1 = tf.Variable(tf.zeros([h1_units]))W2 = tf.Variable(tf.zeros([h1_units, 10]))b2 = tf.Variable(tf.zeros([10]))x = tf.placeholder(tf.float32, [None, in_units])keep_prob = tf.placeholder(tf.float32)hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)hidden1_drop = tf.nn.dropout(hidden1, keep_prob)y = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2)y_ = tf.placeholder(tf.float32, [None, 10])cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), axis=1))train_step = tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy)with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(3000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, {x: batch_xs, y_: batch_ys, keep_prob: 0.75}) correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy,{x:mnist.test.images,y_:mnist.test.labels,keep_prob:0.75}))
這個例子比較簡單,也就是一層網路結構,並且用上了dropout方法,使用了隨機梯度下降優化器。迭代次數在3000,效果不是很好!
推薦閱讀:
※學習筆記TF039:TensorBoard
※Tensorflow op放置策略
※Fully-Convolutional Siamese Networksfor Object Tracking 翻譯筆記
※NLP(2) Tensorflow 文本- 價格建模 Part2
TAG:TensorFlow |