1.对于残差神经网络的shortcut结构的理解有两种,看大家的代码全都是先使用tf.nn.conv2d()函数将输入与1*1的卷积核进行卷积,生成通道数与输出通道数相同的特征图,将这张特征图与经过三个卷积层特征图进行点对点相加,这样就将初级特征前馈到了后面,很好的保留了初级特征。
代码如下:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x

    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([1,1,channel_in,channel_out/4])
        res_b_conv1 = bias_variable([channel_out/4])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)

    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/4,channel_out/2])
        res_b_conv2 = bias_variable([channel_out/2])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)

    with tf.variable_scope("res_sub3"):
        res_w_conv3 = weight_variable([1,1,channel_out/2,channel_out])
        res_b_conv3 = bias_variable([channel_out])
        res_h_conv3 = tf.nn.relu(conv2d(res_h_conv2,res_w_conv3) + res_b_conv3)

    with tf.variable_scope("shortcut"):
        res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        res_b_shortcut = bias_variable([channel_out])
        X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_add = tf.add(res_h_conv3, X_shortcut)
        res_b_shortcut = bias_variable([channel_out])
        res_add_result = tf.nn.relu(res_add + res_b_shortcut)

    return res_add_result


if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    #配置第一个残差块
    h_res1 = res_block(x_image,3,3,1,32)
    h_res1 = pool(h_res1)
    h_res1 = tf.nn.lrn(h_res1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 配置第二个残差块
    #h_res2 = res_block(h_res1, 3, 3, 32, 64)
    #h_res2 = tf.nn.lrn(h_res2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    #配置第三个残差块
    h_res2 = res_block(h_res1, 3, 3, 32, 64)
    h_res2 = pool(h_res2)
    h_res2 = tf.nn.lrn(h_res2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_res2, shape=[-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()

    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

test accuracy:0.8479999899864197
2.换一种思路,直接将输入与经过三个卷积层的特征图进行通道维度上的拼接,使用concat的结果
代码如下:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    # 权重正则化
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x
	# 第一个卷积层
    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([1,1,channel_in,channel_out/4])
        res_b_conv1 = bias_variable([channel_out/4])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)
	# 第二个卷积层
    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/4,channel_out/2])
        res_b_conv2 = bias_variable([channel_out/2])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)
	# 第三个卷积层
    with tf.variable_scope("res_sub3"):
        res_w_conv3 = weight_variable([1,1,channel_out/2,channel_out])
        res_b_conv3 = bias_variable([channel_out])
        res_h_conv3 = tf.nn.relu(conv2d(res_h_conv2,res_w_conv3) + res_b_conv3)
	# shortcut结构 
    with tf.variable_scope("shortcut"):
        # res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        # res_b_shortcut = bias_variable([channel_out])
        # X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_concat = tf.concat(3,[res_h_conv3, X_shortcut])
        res_add_result = tf.nn.relu(res_concat)

    channel = channel_out + channel_in
    return res_add_result,channel


if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    #配置第一个残差块
    h_res1,channel1 = res_block(x_image,5,5,1,32)
    h_res1 = pool(h_res1)
    h_res1 = tf.nn.lrn(h_res1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 配置第二个残差块
    h_res2,channel2 = res_block(h_res1, 3, 3, channel1, 64)
    h_res2 = tf.nn.lrn(h_res2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    #配置第三个残差块
    h_res3,channel3 = res_block(h_res2, 3, 3, channel2, 128)
    h_res3 = pool(h_res3)
    h_res3 = tf.nn.lrn(h_res3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * channel3, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_res3, shape=[-1, 7 * 7 * channel3])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()

    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

准确率:
test accuracy:0.9180000007152558

3.将残差块里的卷积层变为2个3*3的卷积核,第一种:

# -*- coding: utf-8 -*-
# 这里使用结合SEnet、inception结构和shortcut结构的卷积神经网络,所以将784reshape成28*28的矩阵
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def get_data():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    return mnist

# 设置权重函数
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial)

# 设置阈值函数
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 设置卷积层
def conv2d(x,W):
    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding = "SAME")

# 设置池化层
def pool(x):
    return tf.nn.max_pool(x,ksize=[1,2,2,1],strides = [1,2,2,1],padding = "SAME")

def res_block(x,kernal_size_x,kernal_size_y,channel_in,channel_out):

    X_shortcut = x

    with tf.variable_scope("res_sub1"):
        res_w_conv1 = weight_variable([kernal_size_x,kernal_size_y,channel_in,channel_out/2])
        res_b_conv1 = bias_variable([channel_out/2])
        res_h_conv1 = tf.nn.relu(conv2d(x,res_w_conv1) + res_b_conv1)

    with tf.variable_scope("res_sub2"):
        res_w_conv2 = weight_variable([kernal_size_x,kernal_size_y,channel_out/2,channel_out])
        res_b_conv2 = bias_variable([channel_out])
        res_h_conv2 = tf.nn.relu(conv2d(res_h_conv1,res_w_conv2) + res_b_conv2)

    with tf.variable_scope("shortcut"):
        res_w_shortcut = weight_variable([1, 1, channel_in, channel_out])
        res_b_shortcut = bias_variable([channel_out])
        X_shortcut = conv2d(X_shortcut,res_w_shortcut) + res_b_shortcut
        res_add = tf.add(res_h_conv2, X_shortcut)
        res_b_shortcut = bias_variable([channel_out])
        res_add_result = tf.nn.relu(res_add + res_b_shortcut)

    return res_add_result


if __name__ == "__main__":
    # 设置输入
    x = tf.placeholder(tf.float32,[None, 784])
    # 将其变为二维的图片,-1为x的第一维数量
    x_image = tf.reshape(x,[-1,28,28,1])

    #配置第一个残差块
    h_res1 = res_block(x_image,3,3,1,32)
    h_res1 = pool(h_res1)
    h_res1 = tf.nn.lrn(h_res1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 配置第二个残差块
    #h_res2 = res_block(h_res1, 3, 3, 32, 64)
    #h_res2 = tf.nn.lrn(h_res2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    #配置第三个残差块
    h_res2 = res_block(h_res1, 3, 3, 32, 64)
    h_res2 = pool(h_res2)
    h_res2 = tf.nn.lrn(h_res2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    # 配置全连接层1
    W_fc1 = weight_variable([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])
    h2_pool_flat = tf.reshape(h_res2, shape=[-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)

    # 配置dropout层
    keep_prob = tf.placeholder("float")
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # 配置全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    y_predict = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    # 接受y_label
    y_label = tf.placeholder(tf.float32,[None,10])
    mnist = get_data()

    # 交叉熵
    cross_entropy = -tf.reduce_sum(y_label * tf.log(y_predict))
    # 梯度下降法
    train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)
    # train_step = tf.train.GradientDescentOptimizer(1e-3).minimize(cross_entropy)
    # 求准确率
    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))  # 精确度计算
    sess = tf.InteractiveSession()
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:  # 训练100次,验证一次
            train_acc = accuracy.eval(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 1.0})
            print 'step %d, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y %g' % (i, train_acc)
            train_step.run(feed_dict={x: batch[0], y_label: batch[1], keep_prob: 0.5})

    point = 0
    for i in xrange(10):
        testSet = mnist.test.next_batch(50)
        point += accuracy.eval(feed_dict={ x: testSet[0], y_label: testSet[1], keep_prob: 1.0})
    print "test accuracy:"+str(point/10)

准确率:

step 19300, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.98
step 19400, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.96
step 19500, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 1
step 19600, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.94
step 19700, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.9
step 19800, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.92
step 19900, training accuracsess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys,keep_prob: 0.5})y 0.86
test accuracy:0.9439999997615814

Process finished with exit code 0