import warnings
# Ignore warning
warnings.filterwarnings('ignore')
import os
import matplotlib
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
K.image_data_format() == 'channels_last'
from keras_py.utils import get_random_data
from keras_py.face_rec import mask_rec
from keras_py.face_rec import face_rec
from keras_py.mobileNet import MobileNet
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Dataset path
basic_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/"
def letterbox_image(image, size): # Adjust the size of the picture , Return adjusted photos
new_image = cv.resize(image, size, interpolation=cv.INTER_AREA)
return new_image
read_img = cv.imread("test1.jpg")
print(" Adjust the size of the front picture :", read_img.shape)
read_img = letterbox_image(image=read_img, size=(50, 50))
print(" Adjust the size of the front picture :", read_img.shape)
def processing_data(data_path, height, width, batch_size=32, test_split=0.1): # Data processing ,batch_size The default size is 32
train_data = ImageDataGenerator(
# Multiply each pixel value of the picture by this zoom factor , Zoom the pixel value to 0 and 1 It is conducive to the convergence of the model
rescale=1. / 255,
# Floating point numbers , Shear strength ( Counterclockwise shear transformation angle )
shear_range=0.1,
# The amplitude of random scaling , If it is a floating point number , It's equivalent to [lower,upper] = [1 - zoom_range, 1+zoom_range]
zoom_range=0.1,
# Floating point numbers , A certain proportion of the width of the picture , The horizontal offset of the picture when the data is raised
width_shift_range=0.1,
# Floating point numbers , A certain proportion of the height of the picture , The vertical offset of the picture when the data is raised
height_shift_range=0.1,
# Boolean value , Do a random horizontal flip
horizontal_flip=True,
# Boolean value , Do a random vertical flip
vertical_flip=True,
# stay 0 and 1 Between the floating . The proportion of training data used as validation sets
validation_split=test_split
)
# Next, generate the test set , You can refer to the writing method of the training set
test_data = ImageDataGenerator(
rescale=1. / 255,
validation_split=test_split)
train_generator = train_data.flow_from_directory(
# A subdirectory is required under the provided path
data_path,
# Integer tuples (height, width), Default :(256, 256). All images will be resized .
target_size=(height, width),
# The size of a batch of data
batch_size=batch_size,
# "categorical", "binary", "sparse", "input" or None One of .
# Default :"categorical", return one-hot Code tags .
class_mode='categorical',
# Data subsets ("training" or "validation")
subset='training',
seed=0)
test_generator = test_data.flow_from_directory(
data_path,
target_size=(height, width),
batch_size=batch_size,
class_mode='categorical',
subset='validation',
seed=0)
return train_generator, test_generator
# Data path
data_path = basic_path + 'image'
# The number of rows and columns of image data
height, width = 160, 160
# Obtain training data and validation data set
train_generator, test_generator = processing_data(data_path, height, width)
# Passing attribute class_indices The corresponding Dictionary of folder name and class sequence number can be obtained .
labels = train_generator.class_indices
print(labels)
# Convert to the dictionary corresponding to the sequence number of the class and the folder name
labels = dict((v, k) for k, v in labels.items())
print(labels)
pnet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/pnet.h5"
rnet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/rnet.h5"
onet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/onet.h5"
# load MobileNet Pre training model weight
weights_path = basic_path + 'keras_model_data/mobilenet_1_0_224_tf_no_top.h5'
# The number of rows and columns of image data
height, width = 160, 160
model = MobileNet(input_shape=[height,width,3],classes=2)
model.load_weights(weights_path,by_name=True)
print(' Loading complete ...')
def save_model(model, checkpoint_save_path, model_dir): # Save the model
if os.path.exists(checkpoint_save_path):
print(" Model loading ")
model.load_weights(checkpoint_save_path)
print(" The model is loaded ")
checkpoint_period = Tracking network gendan5.comModelCheckpoint(
# Model storage path
model_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
# Test indicators
monitor='val_acc',
# ‘auto’,‘min’,‘max’ Choose from
mode='max',
# Whether only model weights are stored
save_weights_only=False,
# Whether to save only the optimal model
save_best_only=True,
# The number of rounds tested is every 2 round
period=2
)
return checkpoint_period
checkpoint_save_path = "./results/last_one88.h5"
model_dir = "./results/"
checkpoint_period = save_model(model, checkpoint_save_path, model_dir)
# The way the learning rate decreases ,acc If you don't drop three times, you'll drop your learning rate and continue training
reduce_lr = ReduceLROnPlateau(
monitor='accuracy', # Test indicators
factor=0.5, # When acc The proportion of reducing the learning rate when it does not decline
patience=3, # The number of inspection rounds is every three rounds
verbose=2 # Information display mode
)
early_stopping = EarlyStopping(
monitor='val_accuracy', # Test indicators
min_delta=0.0001, # Increasing or decreasing threshold
patience=3, # Number and frequency of rounds tested
verbose=1 # Mode of information display
)
# The size of a training set
batch_size = 64
# Picture data path
data_path = basic_path + 'image'
# The image processing
train_generator, test_generator = processing_data(data_path, height=160, width=160, batch_size=batch_size, test_split=0.1)
# Compile model
model.compile(loss='binary_crossentropy', # Two class loss function
optimizer=Adam(lr=0.001), # Optimizer
metrics=['accuracy']) # Optimization objectives
# Training models
history = model.fit(train_generator,
epochs=20, # epochs: Integers , The total number of iterations of data .
# One epoch Number of steps included , Usually it should be equal to the number of samples in your dataset divided by the batch size .
steps_per_epoch=637 // batch_size,
validation_data=test_generator,
validation_steps=70 // batch_size,
initial_epoch=0, # Integers . Start training rounds ( It helps to get back to training ).
callbacks=[checkpoint_period, reduce_lr])
# Save the model
model.save_weights(model_dir + 'temp.h5')
plt.plot(history.history['loss'],label = 'train_loss')
plt.plot(history.history['val_loss'],'r',label = 'val_loss')
plt.legend()
plt.show()
plt.plot(history.history['accuracy'],label = 'acc')
plt.plot(history.history['val_accuracy'],'r',label = 'val_acc')
plt.legend()
plt.show()