Python tensorflow实现mnist手写数字识别示例【非卷积与卷积实现】
Python  /  管理员 发布于 7年前   209
本文实例讲述了Python tensorflow实现mnist手写数字识别。分享给大家供大家参考,具体如下:
非卷积实现
import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datadata_path = 'F:\CNN\data\mnist'mnist_data = input_data.read_data_sets(data_path,one_hot=True) #offline datasetx_data = tf.placeholder("float32", [None, 784]) # None means we can import any number of imagesweight = tf.Variable(tf.ones([784,10]))bias = tf.Variable(tf.ones([10]))Y_model = tf.nn.softmax(tf.matmul(x_data ,weight) + bias)#Y_model = tf.nn.sigmoid(tf.matmul(x_data ,weight) + bias)'''weight1 = tf.Variable(tf.ones([784,256]))bias1 = tf.Variable(tf.ones([256]))Y_model1 = tf.nn.softmax(tf.matmul(x_data ,weight1) + bias1)weight1 = tf.Variable(tf.ones([256,10]))bias1 = tf.Variable(tf.ones([10]))Y_model = tf.nn.softmax(tf.matmul(Y_model1 ,weight1) + bias1)'''y_data = tf.placeholder("float32", [None, 10])loss = tf.reduce_sum(tf.pow((y_data - Y_model), 2 ))#92%-93%#loss = tf.reduce_sum(tf.square(y_data - Y_model)) #90%-91%optimizer = tf.train.GradientDescentOptimizer(0.01)train = optimizer.minimize(loss)init = tf.global_variables_initializer()sess = tf.Session()sess.run(init) # reset values to wrongfor i in range(100000): batch_xs, batch_ys = mnist_data.train.next_batch(50) sess.run(train, feed_dict = {x_data: batch_xs, y_data: batch_ys}) if i%50==0: correct_predict = tf.equal(tf.arg_max(Y_model,1),tf.argmax(y_data,1)) accurate = tf.reduce_mean(tf.cast(correct_predict,"float")) print(sess.run(accurate,feed_dict={x_data:mnist_data.test.images,y_data:mnist_data.test.labels}))
卷积实现
import tensorflow as tffrom tensorflow.examples.tutorials.mnist import input_datadata_path = 'F:\CNN\data\mnist'mnist_data = input_data.read_data_sets(data_path,one_hot=True) #offline datasetx_data = tf.placeholder("float32", [None, 784]) # None means we can import any number of imagesx_image = tf.reshape(x_data, [-1,28,28,1])w_conv = tf.Variable(tf.ones([5,5,1,32])) #weightb_conv = tf.Variable(tf.ones([32])) #biash_conv = tf.nn.relu(tf.nn.conv2d(x_image , w_conv,strides=[1,1,1,1],padding='SAME')+ b_conv)h_pool = tf.nn.max_pool(h_conv,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')w_fc = tf.Variable(tf.ones([14*14*32,1024]))b_fc = tf.Variable(tf.ones([1024]))h_pool_flat = tf.reshape(h_pool,[-1,14*14*32])h_fc = tf.nn.relu(tf.matmul(h_pool_flat,w_fc) +b_fc)W_fc = w_fc = tf.Variable(tf.ones([1024,10]))B_fc = tf.Variable(tf.ones([10]))Y_model = tf.nn.softmax(tf.matmul(h_fc,W_fc) +B_fc)y_data = tf.placeholder("float32",[None,10])loss = -tf.reduce_sum(y_data * tf.log(Y_model))train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)init = tf.initialize_all_variables()sess = tf.Session()sess.run(init)for i in range(1000): batch_xs,batch_ys =mnist_data.train.next_batch(5) sess.run(train_step,feed_dict={x_data:batch_xs,y_data:batch_ys}) if i%50==0: correct_prediction = tf.equal(tf.argmax(Y_model,1),tf.argmax(y_data,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float")) print(sess.run(accuracy,feed_dict={x_data:mnist_data.test.images,y_data:mnist_data.test.labels}))
更多关于Python相关内容可查看本站专题:《Python数学运算技巧总结》、《Python图片操作技巧总结》、《Python数据结构与算法教程》、《Python函数使用技巧总结》、《Python字符串操作技巧汇总》及《Python入门与进阶经典教程》
希望本文所述对大家Python程序设计有所帮助。
122 在
学历:一种延缓就业设计,生活需求下的权衡之选中评论 工作几年后,报名考研了,到现在还没认真学习备考,迷茫中。作为一名北漂互联网打工人..123 在
Clash for Windows作者删库跑路了,github已404中评论 按理说只要你在国内,所有的流量进出都在监控范围内,不管你怎么隐藏也没用,想搞你分..原梓番博客 在
在Laravel框架中使用模型Model分表最简单的方法中评论 好久好久都没看友情链接申请了,今天刚看,已经添加。..博主 在
佛跳墙vpn软件不会用?上不了网?佛跳墙vpn常见问题以及解决办法中评论 @1111老铁这个不行了,可以看看近期评论的其他文章..1111 在
佛跳墙vpn软件不会用?上不了网?佛跳墙vpn常见问题以及解决办法中评论 网站不能打开,博主百忙中能否发个APP下载链接,佛跳墙或极光..
Copyright·© 2019 侯体宗版权所有·
粤ICP备20027696号