import warnings
# 忽視警告
warnings.filterwarnings('ignore')
import os
import matplotlib
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
K.image_data_format() == 'channels_last'
from keras_py.utils import get_random_data
from keras_py.face_rec import mask_rec
from keras_py.face_rec import face_rec
from keras_py.mobileNet import MobileNet
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# 數據集路徑
basic_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/"
def letterbox_image(image, size): # 調整圖片尺寸,返回經過調整的照片
new_image = cv.resize(image, size, interpolation=cv.INTER_AREA)
return new_image
read_img = cv.imread("test1.jpg")
print("調整前圖片的尺寸:", read_img.shape)
read_img = letterbox_image(image=read_img, size=(50, 50))
print("調整前圖片的尺寸:", read_img.shape)
def processing_data(data_path, height, width, batch_size=32, test_split=0.1): # 數據處理,batch_size默認大小為32
train_data = ImageDataGenerator(
# 對圖片的每個像素值均乘上這個放縮因子,把像素值放縮到0和1之間有利於模型的收斂
rescale=1. / 255,
# 浮點數,剪切強度(逆時針方向的剪切變換角度)
shear_range=0.1,
# 隨機縮放的幅度,若為浮點數,則相當於[lower,upper] = [1 - zoom_range, 1+zoom_range]
zoom_range=0.1,
# 浮點數,圖片寬度的某個比例,數據提升時圖片水平偏移的幅度
width_shift_range=0.1,
# 浮點數,圖片高度的某個比例,數據提升時圖片豎直偏移的幅度
height_shift_range=0.1,
# 布爾值,進行隨機水平翻轉
horizontal_flip=True,
# 布爾值,進行隨機豎直翻轉
vertical_flip=True,
# 在 0 和 1 之間浮動。用作驗證集的訓練數據的比例
validation_split=test_split
)
# 接下來生成測試集,可以參考訓練集的寫法
test_data = ImageDataGenerator(
rescale=1. / 255,
validation_split=test_split)
train_generator = train_data.flow_from_directory(
# 提供的路徑下面需要有子目錄
data_path,
# 整數元組 (height, width),默認:(256, 256)。 所有的圖像將被調整到的尺寸。
target_size=(height, width),
# 一批數據的大小
batch_size=batch_size,
# "categorical", "binary", "sparse", "input" 或 None 之一。
# 默認:"categorical",返回one-hot 編碼標簽。
class_mode='categorical',
# 數據子集 ("training" 或 "validation")
subset='training',
seed=0)
test_generator = test_data.flow_from_directory(
data_path,
target_size=(height, width),
batch_size=batch_size,
class_mode='categorical',
subset='validation',
seed=0)
return train_generator, test_generator
# 數據路徑
data_path = basic_path + 'image'
# 圖像數據的行數和列數
height, width = 160, 160
# 獲取訓練數據和驗證數據集
train_generator, test_generator = processing_data(data_path, height, width)
# 通過屬性class_indices可獲得文件夾名與類的序號的對應字典。
labels = train_generator.class_indices
print(labels)
# 轉換為類的序號與文件夾名對應的字典
labels = dict((v, k) for k, v in labels.items())
print(labels)
pnet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/pnet.h5"
rnet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/rnet.h5"
onet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/onet.h5"
# 加載 MobileNet 的預訓練模型權重
weights_path = basic_path + 'keras_model_data/mobilenet_1_0_224_tf_no_top.h5'
# 圖像數據的行數和列數
height, width = 160, 160
model = MobileNet(input_shape=[height,width,3],classes=2)
model.load_weights(weights_path,by_name=True)
print('加載完成...')
def save_model(model, checkpoint_save_path, model_dir): # 保存模型
if os.path.exists(checkpoint_save_path):
print("模型加載中")
model.load_weights(checkpoint_save_path)
print("模型加載完畢")
checkpoint_period = 跟單網gendan5.comModelCheckpoint(
# 模型存儲路徑
model_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
# 檢測的指標
monitor='val_acc',
# ‘auto’,‘min’,‘max’中選擇
mode='max',
# 是否只存儲模型權重
save_weights_only=False,
# 是否只保存最優的模型
save_best_only=True,
# 檢測的輪數是每隔2輪
period=2
)
return checkpoint_period
checkpoint_save_path = "./results/last_one88.h5"
model_dir = "./results/"
checkpoint_period = save_model(model, checkpoint_save_path, model_dir)
# 學習率下降的方式,acc三次不下降就下降學習率繼續訓練
reduce_lr = ReduceLROnPlateau(
monitor='accuracy', # 檢測的指標
factor=0.5, # 當acc不下降時將學習率下調的比例
patience=3, # 檢測輪數是每隔三輪
verbose=2 # 信息展示模式
)
early_stopping = EarlyStopping(
monitor='val_accuracy', # 檢測的指標
min_delta=0.0001, # 增大或減小的阈值
patience=3, # 檢測的輪數頻率
verbose=1 # 信息展示的模式
)
# 一次的訓練集大小
batch_size = 64
# 圖片數據路徑
data_path = basic_path + 'image'
# 圖片處理
train_generator, test_generator = processing_data(data_path, height=160, width=160, batch_size=batch_size, test_split=0.1)
# 編譯模型
model.compile(loss='binary_crossentropy', # 二分類損失函數
optimizer=Adam(lr=0.001), # 優化器
metrics=['accuracy']) # 優化目標
# 訓練模型
history = model.fit(train_generator,
epochs=20, # epochs: 整數,數據的迭代總輪數。
# 一個epoch包含的步數,通常應該等於你的數據集的樣本數量除以批量大小。
steps_per_epoch=637 // batch_size,
validation_data=test_generator,
validation_steps=70 // batch_size,
initial_epoch=0, # 整數。開始訓練的輪次(有助於恢復之前的訓練)。
callbacks=[checkpoint_period, reduce_lr])
# 保存模型
model.save_weights(model_dir + 'temp.h5')
plt.plot(history.history['loss'],label = 'train_loss')
plt.plot(history.history['val_loss'],'r',label = 'val_loss')
plt.legend()
plt.show()
plt.plot(history.history['accuracy'],label = 'acc')
plt.plot(history.history['val_accuracy'],'r',label = 'val_acc')
plt.legend()
plt.show()