tensorflow2 keras 调用官方提供的模型训练分类与测试

南笙酒味 提交于 2020-03-30 22:03:16


本任务为分类分类属性的8个类别。
dict_gender = {'f':0,
'm':1
}
dict_age = {'children':0,
'young':1,
'adult':2,
'older':3
}

用的keras的datagen_train.flow_from_directory。

train_generator=datagen_train.flow_from_directory('/home/nfs/em1/train_data/age_gender/train-dir',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

文件夹train-dir下放8个文件夹,分别表示8类。自动制作标签。

print(train_generator.class_indices)
print(valid_generator.class_indices)

可以显示类别

resnet50

import os,sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from PIL import Image
import random

from tensorflow.keras.callbacks import ModelCheckpoint



print(tf.__version__)
print(sys.version_info)

HEIGHT = 224
WIDTH = 224

BATCH_SIZE = 64

SIZE = HEIGHT

NUM_TRAIN = 342155
NUM_VAL = 1981


model = ResNet50(
    weights=None, # weights='imagenet'  这里也可以预训练自己的模型 写模型路径即可
    classes=8
)

model.compile(optimizer=tf.train.AdamOptimizer(0.001),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

print(model.summary())

#model.load_weights( "my_net.hdf5" ) #加载预训练模型继续训练

datagen_train = ImageDataGenerator(
        rescale=1./255.0,
        rotation_range=1.5,
        width_shift_range=0.05,
        height_shift_range=0.05,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        fill_mode='nearest')

datagen_val = ImageDataGenerator(
        rescale=1./255.0)


train_generator=datagen_train.flow_from_directory('/home/nfs/em1/yanghailin_em1/train_data/age_gender/my_aug_chengguang/train',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

valid_generator=datagen_val.flow_from_directory('/home/nfs/em1/yanghailin_em1/train_data/age_gender/my_aug_chengguang/test',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

print(train_generator.class_indices)
print(valid_generator.class_indices)


epochs = 10000
filepath = "./model/resnet50-keras_model_{epoch:03d}-{val_acc:.4f}.h5" #避免文件名称重复
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1,
                             save_best_only=False, mode='max')
history = model.fit_generator(generator = train_generator,
                           steps_per_epoch=NUM_TRAIN // BATCH_SIZE,
                           epochs=epochs,
                           validation_data=valid_generator,
                              validation_steps=NUM_VAL // BATCH_SIZE,
                              verbose=1,callbacks=[checkpoint])

resnet50测试代码

import os,sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from PIL import Image
import random

from tensorflow.keras.callbacks import ModelCheckpoint



print(tf.__version__)
print(sys.version_info)

HEIGHT = 224
WIDTH = 224

BATCH_SIZE = 64

SIZE = HEIGHT

NUM_TRAIN = 342155
NUM_VAL = 1981

model = ResNet50(
    weights=None,
    classes=8
)

model.compile(optimizer=tf.train.AdamOptimizer(0.001),
             loss='categorical_crossentropy',
             metrics=['accuracy'])

print(model.summary())

model.load_weights("/resnet50/fuwuqi/model/resnet50-keras_model_017-0.8536.h5")


dict_label_tijiao = {"f_children":"0",
              "f_young":"1",
              "f_adult":"2",
              "f_older":"3",
              "m_children":"4",
              "m_young":"5",
              "m_adult":"6",
              "m_older":"7"
              }

own_label = {'f_adult': 0, 'f_children': 1, 'f_older': 2, 'f_young': 3, 'm_adult': 4, 'm_children': 5, 'm_older': 6, 'm_young': 7}
map_own_label2tijiao = {0:'f_adult',1:'f_children',2:'f_older',3:'f_young',4:'m_adult',5:'m_children',6:'m_older',7:'m_young'}



root_dir_test =  "/data_2/big-data/compete/20200323/src_data/test-tijiao/"
with open(root_dir_test + 'result.txt','w')as fw:
    for root, dirs, files in os.walk(root_dir_test):
        if 0 == len(files):
            continue
        for img_name_ in files:
            print(img_name_)
            if img_name_.endswith(".jpg") or img_name_.endswith(".jpeg") or img_name_.endswith(".png"):
                pos = img_name_.find(".")
                name = img_name_[0:pos]
                img_path = os.path.join(root,img_name_)
                img = image.load_img(img_path, target_size=(SIZE, SIZE))
                img = image.img_to_array(img) / 255.0
                img = np.expand_dims(img, axis=0)  # 为batch添加第四维
                predictions = model.predict(img)
                label = np.argmax(predictions, axis=1)
                print(predictions)
                print("label=", label)
                print('*' * 100)
                # cv2.imshow("img", img[0][:, :, ::-1])
                # cv2.waitKey(0)

InceptionResNetV2

import os
import sys
import tensorflow as tf
import time
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda

from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from tensorflow.keras.callbacks import ModelCheckpoint

from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.optimizers import SGD


print(tf.__version__)
print(sys.version_info)


HEIGHT = 160
WIDTH = 160

BATCH_SIZE = 20

SIZE = HEIGHT

NUM_TRAIN = 342155
NUM_VAL = 1981


nb_classes = 8

model = tf.keras.applications.InceptionResNetV2(weights=None,classes=nb_classes,input_shape=(HEIGHT, WIDTH, 3))


model.compile(
  optimizer=tf.keras.optimizers.RMSprop(),
  loss='categorical_crossentropy',
  metrics=['accuracy'])


model.summary()

datagen_train = ImageDataGenerator(
        rescale=1./255.0,
        rotation_range=1.5,
        width_shift_range=0.05,
        height_shift_range=0.05,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        fill_mode='nearest')

datagen_val = ImageDataGenerator(
        rescale=1./255.0)


train_generator=datagen_train.flow_from_directory('/home/train_data/age_gender/my_aug_chengguang/train',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

valid_generator=datagen_val.flow_from_directory('/home/train_data/age_gender/my_aug_chengguang/test',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

print(train_generator.class_indices)
print(valid_generator.class_indices)


epochs = 10000
filepath = "./model/inception-resnet-model_{epoch:03d}-{val_acc:.4f}.h5" #避免文件名称重复
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1,
                             save_best_only=False, mode='max')
history = model.fit_generator(generator = train_generator,
                           steps_per_epoch=NUM_TRAIN // BATCH_SIZE,
                           epochs=epochs,
                           validation_data=valid_generator,
                              validation_steps=NUM_VAL // BATCH_SIZE,
                              verbose=1,callbacks=[checkpoint])

测试代码

import os
import sys
import tensorflow as tf
import time
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda

from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from tensorflow.keras.callbacks import ModelCheckpoint

from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.optimizers import SGD
from get_acc_fun import *
import numpy as np


print(tf.__version__)
print(sys.version_info)


HEIGHT = 160
WIDTH = 160

BATCH_SIZE = 500

SIZE = HEIGHT

NUM_TRAIN = 342155
NUM_VAL = 1981


nb_classes = 8


#model = tf.keras.applications.InceptionResNetV2(weights='imagenet',include_top=False,classes=nb_classes,input_shape=(HEIGHT, WIDTH, 3))
model = tf.keras.applications.InceptionResNetV2(weights=None,classes=nb_classes,input_shape=(HEIGHT, WIDTH, 3))

model.compile(
  optimizer=tf.keras.optimizers.RMSprop(),
  loss='categorical_crossentropy',
  metrics=['accuracy'])

model.summary()
model.load_weights("/data_1/Yang/project_new/2020/tf_study/tf_xception/Inception-ResNetV2/model/inception-resnet-model_020-0.8573.h5")
dict_label_tijiao = {"f_children":"0",
              "f_young":"1",
              "f_adult":"2",
              "f_older":"3",
              "m_children":"4",
              "m_young":"5",
              "m_adult":"6",
              "m_older":"7"
              }

own_label = {'f_adult': 0, 'f_children': 1, 'f_older': 2, 'f_young': 3, 'm_adult': 4, 'm_children': 5, 'm_older': 6, 'm_young': 7}
map_own_label2tijiao = {0:'f_adult',1:'f_children',2:'f_older',3:'f_young',4:'m_adult',5:'m_children',6:'m_older',7:'m_young'}

root_dir_test =  "/data_2/big-data/compete/20200323/src_data/test-tijiao/"
with open(root_dir_test + 'result.txt','w')as fw:
    for root, dirs, files in os.walk(root_dir_test):
        if 0 == len(files):
            continue
        for img_name_ in files:
            print(img_name_)
            if img_name_.endswith(".jpg") or img_name_.endswith(".jpeg") or img_name_.endswith(".png"):
                pos = img_name_.find(".")
                name = img_name_[0:pos]
                img_path = os.path.join(root,img_name_)
                img = image.load_img(img_path, target_size=(SIZE, SIZE))
                img = image.img_to_array(img) / 255.0
                img = np.expand_dims(img, axis=0)  # 为batch添加第四维
                predictions = model.predict(img)
                label = np.argmax(predictions, axis=1)
                label_describe = map_own_label2tijiao[label[0]]
                label_tijiao_val = dict_label_tijiao[label_describe]
                content = name + " " + label_tijiao_val
                fw.write(content + '\n')

                # print(predictions)
                # print("label=", label)
                # print('*' * 100)
                # cv2.imshow("img", img[0][:, :, ::-1])
                # cv2.waitKey(0)

efficientnet 自定义

efficientnet.py

import tensorflow as tf
import math

NUM_CLASSES = 8


def round_filters(filters, multiplier):
    depth_divisor = 8
    min_depth = None
    min_depth = min_depth or depth_divisor
    filters = filters * multiplier
    new_filters = max(min_depth, int(filters + depth_divisor / 2) // depth_divisor * depth_divisor)
    if new_filters < 0.9 * filters:
        new_filters += depth_divisor
    return int(new_filters)


def round_repeats(repeats, multiplier):
    if not multiplier:
        return repeats
    return int(math.ceil(multiplier * repeats))


class SEBlock(tf.keras.layers.Layer):
    def __init__(self, input_channels, ratio=0.25):
        super(SEBlock, self).__init__()
        self.num_reduced_filters = max(1, int(input_channels * ratio))
        self.pool = tf.keras.layers.GlobalAveragePooling2D()
        self.reduce_conv = tf.keras.layers.Conv2D(filters=self.num_reduced_filters,
                                                  kernel_size=(1, 1),
                                                  strides=1,
                                                  padding="same")
        self.expand_conv = tf.keras.layers.Conv2D(filters=input_channels,
                                                  kernel_size=(1, 1),
                                                  strides=1,
                                                  padding="same")

    def call(self, inputs, **kwargs):
        branch = self.pool(inputs)
        branch = tf.expand_dims(input=branch, axis=1)
        branch = tf.expand_dims(input=branch, axis=1)
        branch = self.reduce_conv(branch)
        branch = tf.nn.swish(branch)
        branch = self.expand_conv(branch)
        branch = tf.nn.sigmoid(branch)
        output = inputs * branch
        return output


class MBConv(tf.keras.layers.Layer):
    def __init__(self, in_channels, out_channels, expansion_factor, stride, k, drop_connect_rate):
        super(MBConv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride
        self.drop_connect_rate = drop_connect_rate
        self.conv1 = tf.keras.layers.Conv2D(filters=in_channels * expansion_factor,
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding="same",
                                            use_bias=False)
        self.bn1 = tf.keras.layers.BatchNormalization()
        self.dwconv = tf.keras.layers.DepthwiseConv2D(kernel_size=(k, k),
                                                      strides=stride,
                                                      padding="same",
                                                      use_bias=False)
        self.bn2 = tf.keras.layers.BatchNormalization()
        self.se = SEBlock(input_channels=in_channels * expansion_factor)
        self.conv2 = tf.keras.layers.Conv2D(filters=out_channels,
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding="same",
                                            use_bias=False)
        self.bn3 = tf.keras.layers.BatchNormalization()
        self.dropout = tf.keras.layers.Dropout(rate=drop_connect_rate)

    def call(self, inputs, training=None, **kwargs):
        x = self.conv1(inputs)
        x = self.bn1(x, training=training)
        x = tf.nn.swish(x)
        x = self.dwconv(x)
        x = self.bn2(x, training=training)
        x = self.se(x)
        x = tf.nn.swish(x)
        x = self.conv2(x)
        x = self.bn3(x, training=training)
        if self.stride == 1 and self.in_channels == self.out_channels:
            if self.drop_connect_rate:
                x = self.dropout(x, training=training)
            x = tf.keras.layers.add([x, inputs])
        return x


def build_mbconv_block(in_channels, out_channels, layers, stride, expansion_factor, k, drop_connect_rate):
    block = tf.keras.Sequential()
    for i in range(layers):
        if i == 0:
            block.add(MBConv(in_channels=in_channels,
                             out_channels=out_channels,
                             expansion_factor=expansion_factor,
                             stride=stride,
                             k=k,
                             drop_connect_rate=drop_connect_rate))
        else:
            block.add(MBConv(in_channels=out_channels,
                             out_channels=out_channels,
                             expansion_factor=expansion_factor,
                             stride=1,
                             k=k,
                             drop_connect_rate=drop_connect_rate))
    return block


class EfficientNet(tf.keras.Model):
    def __init__(self, width_coefficient, depth_coefficient, dropout_rate, drop_connect_rate=0.2):
        super(EfficientNet, self).__init__()

        self.conv1 = tf.keras.layers.Conv2D(filters=round_filters(32, width_coefficient),
                                            kernel_size=(3, 3),
                                            strides=2,
                                            padding="same",
                                            use_bias=False)
        self.bn1 = tf.keras.layers.BatchNormalization()
        self.block1 = build_mbconv_block(in_channels=round_filters(32, width_coefficient),
                                         out_channels=round_filters(16, width_coefficient),
                                         layers=round_repeats(1, depth_coefficient),
                                         stride=1,
                                         expansion_factor=1, k=3, drop_connect_rate=drop_connect_rate)
        self.block2 = build_mbconv_block(in_channels=round_filters(16, width_coefficient),
                                         out_channels=round_filters(24, width_coefficient),
                                         layers=round_repeats(2, depth_coefficient),
                                         stride=2,
                                         expansion_factor=6, k=3, drop_connect_rate=drop_connect_rate)
        self.block3 = build_mbconv_block(in_channels=round_filters(24, width_coefficient),
                                         out_channels=round_filters(40, width_coefficient),
                                         layers=round_repeats(2, depth_coefficient),
                                         stride=2,
                                         expansion_factor=6, k=5, drop_connect_rate=drop_connect_rate)
        self.block4 = build_mbconv_block(in_channels=round_filters(40, width_coefficient),
                                         out_channels=round_filters(80, width_coefficient),
                                         layers=round_repeats(3, depth_coefficient),
                                         stride=2,
                                         expansion_factor=6, k=3, drop_connect_rate=drop_connect_rate)
        self.block5 = build_mbconv_block(in_channels=round_filters(80, width_coefficient),
                                         out_channels=round_filters(112, width_coefficient),
                                         layers=round_repeats(3, depth_coefficient),
                                         stride=1,
                                         expansion_factor=6, k=5, drop_connect_rate=drop_connect_rate)
        self.block6 = build_mbconv_block(in_channels=round_filters(112, width_coefficient),
                                         out_channels=round_filters(192, width_coefficient),
                                         layers=round_repeats(4, depth_coefficient),
                                         stride=2,
                                         expansion_factor=6, k=5, drop_connect_rate=drop_connect_rate)
        self.block7 = build_mbconv_block(in_channels=round_filters(192, width_coefficient),
                                         out_channels=round_filters(320, width_coefficient),
                                         layers=round_repeats(1, depth_coefficient),
                                         stride=1,
                                         expansion_factor=6, k=3, drop_connect_rate=drop_connect_rate)

        self.conv2 = tf.keras.layers.Conv2D(filters=round_filters(1280, width_coefficient),
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding="same",
                                            use_bias=False)
        self.bn2 = tf.keras.layers.BatchNormalization()
        self.pool = tf.keras.layers.GlobalAveragePooling2D()
        self.dropout = tf.keras.layers.Dropout(rate=dropout_rate)
        self.fc = tf.keras.layers.Dense(units=NUM_CLASSES,
                                        activation=tf.keras.activations.softmax)

    def call(self, inputs, training=None, mask=None):
        x = self.conv1(inputs)
        x = self.bn1(x, training=training)
        x = tf.nn.swish(x)

        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)

        x = self.conv2(x)
        x = self.bn2(x, training=training)
        x = tf.nn.swish(x)
        x = self.pool(x)
        x = self.dropout(x, training=training)
        x = self.fc(x)

        return x


def get_efficient_net(width_coefficient, depth_coefficient, resolution, dropout_rate):
    net = EfficientNet(width_coefficient=width_coefficient,
                       depth_coefficient=depth_coefficient,
                       dropout_rate=dropout_rate)

    return net


def efficient_net_b0():
    return get_efficient_net(1.0, 1.0, 224, 0.2)


def efficient_net_b1():
    return get_efficient_net(1.0, 1.1, 240, 0.2)


def efficient_net_b2():
    return get_efficient_net(1.1, 1.2, 260, 0.3)


def efficient_net_b3():
    return get_efficient_net(1.2, 1.4, 300, 0.3)


def efficient_net_b4():
    return get_efficient_net(1.4, 1.8, 380, 0.4)


def efficient_net_b5():
    return get_efficient_net(1.6, 2.2, 456, 0.4)


def efficient_net_b6():
    return get_efficient_net(1.8, 2.6, 528, 0.5)


def efficient_net_b7():
    return get_efficient_net(2.0, 3.1, 600, 0.5)

efficientnet_train.py

import os,sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from PIL import Image
import random

from tensorflow.keras.callbacks import ModelCheckpoint

import efficientnet



print(tf.__version__)
print(sys.version_info)

HEIGHT = 224
WIDTH = 224

BATCH_SIZE = 20

SIZE = HEIGHT

NUM_TRAIN = 17786
NUM_VAL = 1981

CHANNELS = 3
IMAGE_HEIGHT = HEIGHT
IMAGE_WIDTH = WIDTH




model = efficientnet.efficient_net_b0()
model.build(input_shape=(None, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS))
model.summary()

optimizer_rmsp = tf.keras.optimizers.RMSprop()

model.compile(optimizer=optimizer_rmsp,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

datagen_train = ImageDataGenerator(
        rescale=1./255.0,
        rotation_range=1.5,
        width_shift_range=0.05,
        height_shift_range=0.05,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        fill_mode='nearest')

datagen_val = ImageDataGenerator(
        rescale=1./255.0)


train_generator=datagen_train.flow_from_directory('/data_2/big-data/compete/20200323/src_data/age_gender1/train',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

valid_generator=datagen_val.flow_from_directory('/data_2/big-data/compete/20200323/src_data/age_gender1/test',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

print(train_generator.class_indices)
print(valid_generator.class_indices)


epochs = 10000
filepath = "./model/efficient-keras_model_{epoch:03d}-{val_acc:.4f}.h5" #避免文件名称重复
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1,
                             save_best_only=False, mode='max')
history = model.fit_generator(generator = train_generator,
                           steps_per_epoch=NUM_TRAIN // BATCH_SIZE,
                           epochs=epochs,
                           validation_data=valid_generator,
                              validation_steps=NUM_VAL // BATCH_SIZE,
                              verbose=1,callbacks=[checkpoint])

efficientnet_test.py

import os,sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator

from PIL import Image
import random

from tensorflow.keras.callbacks import ModelCheckpoint

import efficientnet

from tensorflow.keras.preprocessing import image



print(tf.__version__)
print(sys.version_info)

HEIGHT = 224
WIDTH = 224

BATCH_SIZE = 20

SIZE = HEIGHT

NUM_TRAIN = 17786
NUM_VAL = 1981

CHANNELS = 3
IMAGE_HEIGHT = HEIGHT
IMAGE_WIDTH = WIDTH




model = efficientnet.efficient_net_b0()
model.build(input_shape=(None, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS))
model.summary()

optimizer_rmsp = tf.keras.optimizers.RMSprop()

model.compile(optimizer=optimizer_rmsp,
              loss='categorical_crossentropy',
              metrics=['accuracy'])



print(model.summary())

model.load_weights("/data_1/Yang/project_new/2020/tf_study/tf_xception/efficientnet/model/efficient-keras_model_001-0.3758.h5")


dict_label_tijiao = {"f_children":"0",
              "f_young":"1",
              "f_adult":"2",
              "f_older":"3",
              "m_children":"4",
              "m_young":"5",
              "m_adult":"6",
              "m_older":"7"
              }

own_label = {'f_adult': 0, 'f_children': 1, 'f_older': 2, 'f_young': 3, 'm_adult': 4, 'm_children': 5, 'm_older': 6, 'm_young': 7}
map_own_label2tijiao = {0:'f_adult',1:'f_children',2:'f_older',3:'f_young',4:'m_adult',5:'m_children',6:'m_older',7:'m_young'}



root_dir_test =  "/data_2/big-data/compete/20200323/src_data/test-tijiao/"
with open(root_dir_test + 'result.txt','w')as fw:
    for root, dirs, files in os.walk(root_dir_test):
        if 0 == len(files):
            continue
        for img_name_ in files:
            print(img_name_)
            if img_name_.endswith(".jpg") or img_name_.endswith(".jpeg") or img_name_.endswith(".png"):
                pos = img_name_.find(".")
                name = img_name_[0:pos]
                img_path = os.path.join(root,img_name_)
                img = image.load_img(img_path, target_size=(SIZE, SIZE))
                img = image.img_to_array(img) / 255.0
                img = np.expand_dims(img, axis=0)  # 为batch添加第四维
                predictions = model.predict(img)
                label = np.argmax(predictions, axis=1)
                #label = 5
                label_describe = map_own_label2tijiao[label[0]]
                label_tijiao_val = dict_label_tijiao[label_describe]
                content = name + " " + label_tijiao_val
                fw.write(content + '\n')

                # print(predictions)
                # print("label=", label)
                # print('*' * 100)
                # cv2.imshow("img", img[0][:, :, ::-1])
                # cv2.waitKey(0)

inceptionV3

import os
import sys
import tensorflow as tf
import time
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda

from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from tensorflow.keras.callbacks import ModelCheckpoint

from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.optimizers import SGD


print(tf.__version__)
print(sys.version_info)

HEIGHT = 160
WIDTH = 160

BATCH_SIZE = 20

SIZE = HEIGHT

NUM_TRAIN = 17786
NUM_VAL = 1981


# 数据准备
IM_WIDTH, IM_HEIGHT = 160, 160  # InceptionV3指定的图片尺寸
FC_SIZE = 1024  # 全连接层的节点个数
NB_IV3_LAYERS_TO_FREEZE = 172  # 冻结层的数量
nb_classes = 8


# 添加新层
def add_new_last_layer(base_model, nb_classes):
    """
    添加最后的层
    输入
    base_model和分类数量
    输出
    新的keras的model
    """
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(FC_SIZE, activation='relu')(x)  # new FC layer, random init
    predictions = Dense(nb_classes, activation='softmax')(x)  # new softmax layer
    model = tf.keras.models.Model(inputs=base_model.input, outputs=predictions)
    return model


# 冻上NB_IV3_LAYERS之前的层
def setup_to_finetune(model):
    """Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.
    note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch
    Args:
      model: keras model
    """
    for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
        layer.trainable = False
    for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])


# 设置网络结构
model = InceptionV3(weights='imagenet', include_top=False,input_shape=(HEIGHT, WIDTH, 3))
model = add_new_last_layer(model, nb_classes)
setup_to_finetune(model)

model.summary()

datagen_train = ImageDataGenerator(
        rescale=1./255.0,
        rotation_range=1.5,
        width_shift_range=0.05,
        height_shift_range=0.05,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        fill_mode='nearest')

datagen_val = ImageDataGenerator(
        rescale=1./255.0)


train_generator=datagen_train.flow_from_directory('/data_2/train',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

valid_generator=datagen_val.flow_from_directory('/data_2/test',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

print(train_generator.class_indices)
print(valid_generator.class_indices)


epochs = 10000
filepath = "./model/inception-keras_model_{epoch:03d}-{val_acc:.4f}.h5" #避免文件名称重复
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1,
                             save_best_only=False, mode='max')
history = model.fit_generator(generator = train_generator,
                           steps_per_epoch=NUM_TRAIN // BATCH_SIZE,
                           epochs=epochs,
                           validation_data=valid_generator,
                              validation_steps=NUM_VAL // BATCH_SIZE,
                              verbose=1,callbacks=[checkpoint])

xception 注意这里调用官方的模型之后又接了自己的全连接层,前提include_top=False,不用官方的最后的全连接层 tf.keras.applications.Xception(weights='imagenet', include_top=False, input_shape=(HEIGHT, WIDTH, 3))

import os
import sys
import tensorflow as tf
import time
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, Reshape, Dense, multiply, Permute, Concatenate, Conv2D, Add, Activation, Lambda

from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from tensorflow.keras.callbacks import ModelCheckpoint



print(tf.__version__)
print(sys.version_info)

HEIGHT = 128
WIDTH = 128

BATCH_SIZE = 20

SIZE = HEIGHT

NUM_TRAIN = 17786
NUM_VAL = 1981



xception = tf.keras.applications.Xception(weights='imagenet', include_top=False, input_shape=(HEIGHT, WIDTH, 3))
# xception.trainable = False   ###表示不训练,直接用训练好的
inputs = tf.keras.layers.Input(shape=(HEIGHT, WIDTH, 3))
x = xception(inputs)

x = tf.keras.layers.GlobalAveragePooling2D()(x)

x1 = tf.keras.layers.Dense(512, activation='relu')(x)
x1 = tf.keras.layers.Dense(256, activation='relu')(x1)

out_x = tf.keras.layers.Dense(8, name='out_x')(x1)


predictions = out_x

model = tf.keras.models.Model(inputs=inputs, outputs=predictions,name='xception-keras')
print(model.summary())

model.compile(loss="categorical_crossentropy",
                           optimizer="adam", metrics=['accuracy'])



datagen_train = ImageDataGenerator(
        rescale=1./255.0,
        rotation_range=2,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True,
        fill_mode='nearest')

datagen_val = ImageDataGenerator(
        rescale=1./255.0)


train_generator=datagen_train.flow_from_directory('/data_2/train',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=True,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

valid_generator=datagen_val.flow_from_directory('/data_2/test',#类别子文件夹的上一级文件夹
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                        target_size=[SIZE, SIZE],
                                        class_mode='categorical'
                                      )

print(train_generator.class_indices)
print(valid_generator.class_indices)


epochs = 10000
filepath = "./model/0keras_model_{epoch:03d}-{val_acc:.4f}.h5" #避免文件名称重复
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1,
                             save_best_only=False, mode='max')
history = model.fit_generator(generator = train_generator,
                           steps_per_epoch=NUM_TRAIN // BATCH_SIZE,
                           epochs=epochs,
                           validation_data=valid_generator,
                              validation_steps=NUM_VAL // BATCH_SIZE,
                              verbose=1,callbacks=[checkpoint])

其他网络可以参考官方,或者github搜索“Basic_CNNs_TensorFlow2-master”

标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!