Tensorflow加载预训练的resnet_v2_50进行自构模型层的微调

╄→гoц情女王★ 提交于 2019-12-15 01:51:47

0.Pre-trained Model------resnet_v2_50

(1)简介

resnet v1和v2总结如下,首先给出resnet v2的paper里面kaiming大神给出的不同的结构对比:

图a为resnet v1的结构,图e为resnet v2的结构。(weight为conv层),左分支为identity分支,右分支为residual分支。

图的结构有两个特点:1、conv-bn-relu;2、最后的relu在addition后面。

(2)代码

具体代码实现见Tensorflow官方代码的模型库中:

https://github.com/tensorflow/models/tree/master/research/slim/nets


(3)加载方式

from tensorflow.contrib.slim.nets import resnet_v2

(4)Pre-trained Model检查点文件下载

官方下载地址:https://github.com/tensorflow/models/tree/master/research/slim#Pretrained

1.自己的手势数据

(1)简介


输入的是类似上图所示的手势图片数据,总共有6个类。所给的数据已经加工过,是‘.h5’格式的数据。有1080张图片,120张测试数据。每一张图片是一个64x64的RGB图片。具体的数据格式为:

number of training examples = 1080
number of test examples = 120
X_train shape: (1080, 64, 64, 3)
Y_train shape: (1080, 6)
X_test shape: (120, 64, 64, 3)
Y_test shape: (120, 6)
x train max,  0.956; x train min,  0.015
x test max,  0.94; x test min,  0.011

(2)下载地址

链接:https://pan.baidu.com/s/1iA004kLU1gocvA-gaiwSWw
提取码:sqj3

下载后如下格式:

 (3)使用方法

包括训练时需要使用到的数据加载函数,具体代码如下,划分为utils.py文件:

import os
import numpy as np
import tensorflow as tf
import h5py
import math



def load_dataset(path):
    train_dataset = h5py.File(path+'/train_signs.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:])  # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:])  # your train set labels

    test_dataset = h5py.File(path+'/test_signs.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:])  # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:])  # your test set labels

    classes = np.array(test_dataset["list_classes"][:])  # the list of classes

    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))

    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes


def process_orig_datasets(datasets):
    """
    normalize x_train and convert y_train to one hot.
    :param datasets: X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes
    :return: X_train, Y_train, X_test, Y_test
    """
    X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = datasets

    # Normalize image vectors
    X_train = X_train_orig / 255.
    X_test = X_test_orig / 255.

    # Convert training and test labels to one hot matrices
    Y_train = convert_to_one_hot(Y_train_orig, 6).T
    Y_test = convert_to_one_hot(Y_test_orig, 6).T
    return X_train, Y_train, X_test, Y_test


def random_mini_batches(X, Y, mini_batch_size=64, seed=None):
    """
    Creates a list of random minibatches from (X, Y)
    Arguments:
    X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
    Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)
    mini_batch_size - size of the mini-batches, integer
    seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
    Returns:
    mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
    """

    m = X.shape[0]  # number of training examples
    mini_batches = []
    np.random.seed(seed)

    # Step 1: Shuffle (X, Y)
    permutation = list(np.random.permutation(m))
    shuffled_X = X[permutation, :, :, :]
    shuffled_Y = Y[permutation, :]

    # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
    num_complete_minibatches = math.floor(
        m / mini_batch_size)  # number of mini batches of size mini_batch_size in your partitionning
    for k in range(0, num_complete_minibatches):
        mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :, :, :]
        mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    # Handling the end case (last mini-batch < mini_batch_size)
    if m % mini_batch_size != 0:
        mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :, :, :]
        mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

    return mini_batches


def convert_to_one_hot(Y, C):
    Y = np.eye(C)[Y.reshape(-1)].T
    return Y

2、加载Pre-trained resnet_v2_50,恢复指定权重并继续重新训练(finetuning)

(1)训练代码

import tensorflow as tf
import time
from tensorflow.contrib.slim.nets import resnet_v2
from tensorflow.contrib.slim import nets
from tensorflow.python import pywrap_tensorflow
from utils import *

slim = tf.contrib.slim

tf.reset_default_graph()
data_dir = 'E:/Tfexercise/Resnet-Finetune/datasets'  #自己手势数据的文件夹
model_save_path='E:/Tfexercise/Resnet-Finetune/save_model/'  #微调后的模型保存地址


# 加载自己的的数据,函数见utils.py
orig_data = load_dataset(data_dir)   
X_train, Y_train, X_test, Y_test = process_orig_datasets(orig_data)
mini_batches = random_mini_batches(X_train, Y_train, mini_batch_size=32, seed=None)

#pre-trained model保存地址
model_path="E:/Tfexercise/Resnet-Finetune/resnet_v2_50_2017_04_14/resnet_v2_50.ckpt" 

images=tf.placeholder(tf.float32,(None,64,64,3),'imnput_images')
labels = tf.placeholder(tf.int64, [None, 6])
 
net,endpoins=resnet_v2.resnet_v2_50(images,is_training=True)

#自己添加的网络层,作用域命名为“finetune”
with tf.variable_scope('finetune'):
    flatten = tf.layers.flatten(net)
    x = tf.layers.dense(flatten, units=50, activation=tf.nn.relu)
    keep_prob = tf.placeholder(tf.float32)
    x = tf.nn.dropout(x, keep_prob)
    logits = tf.layers.dense(x, units=6, activation=tf.nn.softmax)
    # cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y_conv)
    y=tf.nn.softmax(logits)
    cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
    cross_entropy_cost = tf.reduce_mean(cross_entropy)
    correct_predicion=tf.equal(tf.argmax(y,1),tf.argmax(labels,1))
    accuracy=tf.reduce_mean(tf.cast(correct_predicion,'float'))


train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy_cost) #Resnet_v2_50和自己构建的模型层都训练


'''
Train 训练数据进行训练的过程
'''
saver = tf.train.Saver()
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
with tf.Session() as sess:

    sess.run(tf.global_variables_initializer())
    var_list=tf.trainable_variables()
    print(var_list)
    print(len(var_list))
    if isinstance(var_list, dict):
        var_dict = var_list
        print('true')
    else:
        var_dict = {var.op.name: var for var in var_list}
        print(len(var_dict))
        # print(var_dict)
        print('False')
    available_vars = {}
    for var in var_dict:
        if reader.has_tensor(var):
            print("nice")
            available_vars[var] = var_dict[var]
            print(available_vars[var],var_dict[var])
    var_list = available_vars
    if var_list:
        saver_restore = tf.train.Saver(var_list)
        saver = tf.train.Saver(tf.global_variables())
        saver_restore.restore(sess, model_path)
    for i in range(10000):
        X_mini_batch, Y_mini_batch = mini_batches[np.random.randint(0, len(mini_batches))]
        train_step.run(feed_dict={images: X_mini_batch, labels: Y_mini_batch, keep_prob: 0.5})

        if i % 20 == 0:
            train_cost = sess.run(accuracy, feed_dict={images: X_mini_batch,
                                                            labels: Y_mini_batch, keep_prob: 1.0})
            print('step %d, training cost %g' % (i, train_cost))
        if (i + 1) % 100 == 0:
            saver.save(sess, model_save_path, global_step=i + 1)
            print('save mode to {}'.format(model_save_path))

'''
Evaluate  测试数据用于测试的过程
'''
# saver = tf.train.Saver(tf.global_variables())
# reader = pywrap_tensorflow.NewCheckpointReader(tf.train.latest_checkpoint(model_save_path))
# with tf.Session() as sess2:
#     sess2.run(tf.global_variables_initializer())
#     var_list=tf.trainable_variables()
#     for var_i in var_list:
#         if 'weights' in var_i.op.name:
#             print(var_i.name)
#     print(len(var_list))
#     if isinstance(var_list, dict):
#         var_dict = var_list
#         print('true')
#     else:
#         var_dict = {var.op.name: var for var in var_list}
#         print(len(var_dict))
#         print('False')
#     available_vars = {}
#     for var in var_dict:
#         if reader.has_tensor(var):
#             print("nice")
#             available_vars[var] = var_dict[var]
#     print(available_vars)
#
#     saver.restore(sess2, tf.train.latest_checkpoint(model_save_path))
#     accu = sess2.run(accuracy, feed_dict={images: X_test, labels: Y_test, keep_prob: 1.0})
#     print('accuracy %g' % (accu))

部分结果显示:

(2)测试代码

只需将上面代码的“Train”模块代码进行注释掉,将“Evaluate”模块代码显示就可以。

最终测试结果:

3、加载Pre-trained resnet_v2_50,恢复指定权重并使optimizer只训练自己添加的模型层(指定层、变量)

(1)训练代码

import tensorflow as tf
import time
from tensorflow.contrib.slim.nets import resnet_v2
from tensorflow.contrib.slim import nets
from tensorflow.python import pywrap_tensorflow
from utils import *

slim = tf.contrib.slim

tf.reset_default_graph()
data_dir = 'E:/Tfexercise/Resnet-Finetune/datasets'  #自己手势数据的文件夹
model_save_path='E:/Tfexercise/Resnet-Finetune/save_model/'  #微调后的模型保存地址


# 加载自己的的数据,函数见utils.py
orig_data = load_dataset(data_dir)   
X_train, Y_train, X_test, Y_test = process_orig_datasets(orig_data)
mini_batches = random_mini_batches(X_train, Y_train, mini_batch_size=32, seed=None)

#pre-trained model保存地址
model_path="E:/Tfexercise/Resnet-Finetune/resnet_v2_50_2017_04_14/resnet_v2_50.ckpt" 

images=tf.placeholder(tf.float32,(None,64,64,3),'imnput_images')
labels = tf.placeholder(tf.int64, [None, 6])
 
net,endpoins=resnet_v2.resnet_v2_50(images,is_training=True)

#自己添加的网络层,作用域命名为“finetune”
with tf.variable_scope('finetune'):
    flatten = tf.layers.flatten(net)
    x = tf.layers.dense(flatten, units=50, activation=tf.nn.relu)
    keep_prob = tf.placeholder(tf.float32)
    x = tf.nn.dropout(x, keep_prob)
    logits = tf.layers.dense(x, units=6, activation=tf.nn.softmax)
    # cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y_conv)
    y=tf.nn.softmax(logits)
    cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
    cross_entropy_cost = tf.reduce_mean(cross_entropy)
    correct_predicion=tf.equal(tf.argmax(y,1),tf.argmax(labels,1))
    accuracy=tf.reduce_mean(tf.cast(correct_predicion,'float'))

'''
变化部分
'''
loss_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='finetune') ##关键!获取指定scope下的变量
print(loss_vars)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy_cost,var_list=loss_vars)


'''
Train 训练数据进行训练的过程
'''
saver = tf.train.Saver()
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    var_list=tf.trainable_variables()
    print(var_list)
    print(len(var_list))
    if isinstance(var_list, dict):
        var_dict = var_list
        print('true')
    else:
        var_dict = {var.op.name: var for var in var_list}
        print(len(var_dict))
        # print(var_dict)
        print('False')
    available_vars = {}
    for var in var_dict:
        if reader.has_tensor(var):
            print("nice")
            available_vars[var] = var_dict[var]
            print(available_vars[var],var_dict[var])
    var_list = available_vars
    if var_list:
        saver_restore = tf.train.Saver(var_list)
        saver = tf.train.Saver(tf.global_variables())
        saver_restore.restore(sess, model_path)
    for i in range(10000):
        X_mini_batch, Y_mini_batch = mini_batches[np.random.randint(0, len(mini_batches))]
        train_step.run(feed_dict={images: X_mini_batch, labels: Y_mini_batch, keep_prob: 0.5})

        if i % 20 == 0:
            train_cost = sess.run(accuracy, feed_dict={images: X_mini_batch,
                                                            labels: Y_mini_batch, keep_prob: 1.0})
            print('step %d, training cost %g' % (i, train_cost))
        if (i + 1) % 100 == 0:
            saver.save(sess, model_save_path, global_step=i + 1)
            print('save mode to {}'.format(model_save_path))

'''
Evaluate  测试数据用于测试的过程
'''
# saver = tf.train.Saver(tf.global_variables())
# reader = pywrap_tensorflow.NewCheckpointReader(tf.train.latest_checkpoint(model_save_path))
# with tf.Session() as sess2:
#     sess2.run(tf.global_variables_initializer())
#     var_list=tf.trainable_variables()
#     for var_i in var_list:
#         if 'weights' in var_i.op.name:
#             print(var_i.name)
#     print(len(var_list))
#     if isinstance(var_list, dict):
#         var_dict = var_list
#         print('true')
#     else:
#         var_dict = {var.op.name: var for var in var_list}
#         print(len(var_dict))
#         print('False')
#     available_vars = {}
#     for var in var_dict:
#         if reader.has_tensor(var):
#             print("nice")
#             available_vars[var] = var_dict[var]
#     print(available_vars)
#
#     saver.restore(sess2, tf.train.latest_checkpoint(model_save_path))
#     accu = sess2.run(accuracy, feed_dict={images: X_test, labels: Y_test, keep_prob: 1.0})
#     print('accuracy %g' % (accu))

显然变化部分仅仅是:

'''
变化部分
'''
loss_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='finetune') ##关键!获取指定scope下的变量
print(loss_vars)
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy_cost,var_list=loss_vars)

最终训练结果显示:

(2)测试代码

只需将上面代码的“Train”模块代码进行注释掉,将“Evaluate”模块代码显示就可以。

最终测试结果:

4、对比

对比第2、3节的微调方法,可以看出,在本数据集上,在epoch相同的情况下,加载Pre-trained Resnet_v2_50模型后,进行包括Resnet在内的全部模型变量重新训练所达到的效果更佳。

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!