个人笔记 感谢指正
手写数字识别
参考链接:https://blog.csdn.net/qq_32241189/article/details/80450741
1.导入包
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
2.导入数据集
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
#read_data_sets函数是专门下载MNIST数据集的,注意这里注意这里是one_hot,即标签不是一个值而是一个向量
#数据集分为train,validation,test三个数据集:
1.返回数据集train样本数 mnist.train.num_examples
2.返回数据集validation样本数 mnist.validation.num_examples
3.返回数据集test样本数 mnist.test.num_examples
4.使用mnist.train.images返回train数据集中的所有图片的像素值
5.使用mnist.train.labels返回train数据集中的所有图片的标签
6.使用mnist.train.next_batch()将数据输入神经网络
3.定义批次的大小
batch_size = 100
4.计算批次的数量
n_batch = mnist.train.num_examples // batch_size
5.定义变量的summary函数
def variables_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var) #reduce_mean()函数可以进行多种均值操作,详见函数的定义
tf.summary.scalar('mean',mean) #设置标量,以在tensorboard中显示
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean))) #计算标准差
tf.summary.scalar('stddev',stddev)
tf.summary.scalar('max',tf.reduce_max(var)) #求出最大值
tf.summary.scalar('min',tf.reduce_min(var)) #求出最小值
tf.summary.scalar('histogram',var) #直方图
6.定义初始化权重函数
def weight_variable(shape,name):
initial = tf.truncated_normal(shape,stddev=0.1) #产生服从正态分布的shape形状的tensor
return tf.Variable(initial,name=name) #将initial包裹为变量,即其中的值可以改变
#tf.truncated_normal()函数:The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked.
7.定义初始化偏置值函数
def bias_variable(shape,name):
initial = tf.truncated_normal(shape=shape,stddev=0.1)
return tf.Variable(initial,name=name)
8.定义卷积层函数
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
#注意:tf.nn的卷积的卷积核大小是通过初始化卷积权重设置的
9.定义池化层
def max_pool_2X2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
#tf.nn.max_pool(value, ksize, strides, padding, name=None)函数参数:
value:需要池化的输入,一般池化层接在卷积层后面,所以输入通常是feature map,依然是 [batch, height, width, channels]这样的shape
ksize:池化窗口的大小,取一个四维向量,一般是[1, height, width, 1],因为我们不想在batch和channels上做池化,所以这两个维度设为了1
strides:和卷积类似,窗口在每一个维度上滑动的步长,一般也是[1, stride,stride, 1]
padding:和卷积类似,可以取’VALID’ 或者’SAME’,返回一个Tensor,类型不变,shape仍然是[batch, height, width, channels]这种形式
10.定义输入
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32,[None,784],name='x_input') #None的意思待定传入的x是多少就是多少
y = tf.placeholder(tf.float32, [None, 10], name='y_input')
with tf.name_scope('x_image'):
x_image = tf.reshape(x,[-1,28,28,1]) #这里的-1的意思通过函数内部计算自动得出的
11.初始化卷积核的权重
with tf.name_scope('Conv1'):
with tf.name_scope('W_conv1'):
W_conv1 = weight_variable([5,5,1,32],name='W_conv1') #[5,5,1,32],1是输入通道数,32是输出通道数
12.初始化偏置
with tf.name_scope('b_conv1'):
bias_conv1 = bias_variable([32],name='b_conv1')
13.x_image与卷积相乘并加上偏置
with tf.name_scope('conv2d_1'):
conv2d_1 = conv2d(x_image,W_conv1) + bias_conv1
with tf.name_scope('relu'):
h_conv1 = tf.nn.relu(conv2d_1)
with tf.name_scope('h_pool1'):
h_pool1 = max_pool_2X2(h_conv1)
14.第二层卷积层
with tf.name_scope('Conv2'):
with tf.name_scope('W_conv2'):
W_conv2 = weight_variable([5,5,32,64],name='W_conv2')
with tf.name_scope('b_conv2'):
bias_conv2 = bias_variable([64],name='b_conv2')
with tf.name_scope('conv2d_2'):
conv2d_2 = conv2d(h_pool1,W_conv2) + bias_conv2
with tf.name_scope('relu'):
h_conv2 = tf.nn.relu(conv2d_2)
with tf.name_scope('h_pool2'):
h_pool2 = max_pool_2X2(h_conv2)
15.全连接层1
with tf.name_scope('fc1'):
with tf.name_scope('W_fc1'):
W_fc1 = weight_variable([7*7*64,1024],name='W_fc1')
with tf.name_scope('b_fc1'):
bias_fc1 = bias_variable([1024],name='b_fc1')
with tf.name_scope('h_pool2_flat'):
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64],name='h_pool2_flat') #将上一层输出扁平化作为本层输入
with tf.name_scope('wx_plus_b1'):
wx_plus_b1 = tf.matmul(h_pool2_flat,W_fc1) + bias_fc1
with tf.name_scope('relu'):
h_fc1 = tf.nn.relu(wx_plus_b1)
#注意:卷积层与全连接层的连接需要进行扁平化操作,因为卷积层输出的多通道的二维或者多维的数据,但是全连接层只能接受2维数据,即全连接层的每一个神经元接受的是一个一维向量
16.Dropout层
with tf.name_scope('keep_prob'):
keep_prob = tf.placeholder(tf.float32,name='keep_prob')
with tf.name_scope('h_fc1_drop'):
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob,name='h_fc1_drop')
#tf.nn.dropout()函数:该函数输入输出的shape都是一样,只是进行了dropout函数
17.全连接层2
with tf.name_scope('fc2'):
with tf.name_scope('W_fc2'):
W_fc2 = weight_variable([1024,10],name='W_fc2')
with tf.name_scope('bias_fc2'):
bias_fc2 = bias_variable([10],name='bias_fc2')
with tf.name_scope('wx_plus_b2'):
wx_plus_b2 = tf.matmul(h_fc1_drop,W_fc2) + bias_fc2
18.softmax层
with tf.name_scope('softmax'):
prediction = tf.nn.softmax(wx_plus_b2)
#tf.nn.softmax()函数:输入输出的shape也是一样的,只是每个元素变为了概率值,所有的元素的和为1.
注意:神经网络的优化其实就是将每一个样本在网络中最后的输出向该样本的标签靠近,因此样本的标签必须是一个数量类型的值或tensor
19.计算代价函数
#定义代价函数
with tf.name_scope('cross_entropy'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction),name='cross_entropy_')
tf.summary.scalar('cross_entropy',cross_entropy)
#tf.nn.softmax_cross_entropy_with_logits(labels,logits)函数是交叉熵函数
20.使用AdamOptimizer优化
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
21.计准确率
with tf.name_scope('accuracy'):
#将结果放进一个布尔列表中
correct_prediction = tf.equal(tf.argmax(prediction,1),tf.argmax(y,1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #cast函数作用是转换dtype,例如讲float32转换为int32
tf.summary.scalar('accuracy',accuracy)
#tf.argmax(prediction,1)函数:由函数参数可得axis=1,对prediction在第二个维度进行缩减,也就是求每一行的最大值,并返回每一行最大值的索引
#tf.equal(x,y)函数:判断x与y的值是否相等,返回一个布尔量为元素的列表
22.合并所有的summary
merged = tf.summary.merge_all()
23.Session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('logs/train',sess.graph) #将sess.graph写入文件
test_writer = tf.summary.FileWriter('logs/test',sess.graph)
for i in range(1001):
#训练模型
batch_xs, batch_ys = mnist.train.next_batch(batch_size) #Return the next `batch_size` examples from this data set
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.5})
#记录训练集计算的参数
sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
#记录测试集计算的参数
batch_xs, batch_ys = mnist.test.next_batch(batch_size)
summary = sess.run(merged,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
test_writer.add_summary(summary,i)
if i%100 == 0:
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
train_acc = sess.run(accuracy,feed_dict={x: mnist.train.images[:10000], y: mnist.train.labels[:10000], keep_prob: 1.0})
print('iter' + str(i) + ',Testing Accuracy=' + str(test_acc) + ',Training Accuracy=' + str(train_acc))
24.搭建网络的步骤
1.初始化输入
2.初始化权重与偏置参数
3.搭建每一层
4.定义代价函数
5.优化代价函数
6.编写Session
来源:CSDN
作者:weiyu_CHN
链接:https://blog.csdn.net/gzy_wyu/article/details/104275708