基于python实现的口罩佩戴检测-凯发app官方网站

凯发app官方网站-凯发k8官网下载客户端中心 | | 凯发app官方网站-凯发k8官网下载客户端中心
  • 博客访问: 3600087
  • 博文数量: 365
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 2522
  • 用 户 组: 普通用户
  • 注册时间: 2019-10-28 13:40
文章分类

(365)

  • (365)
文章存档

(8)

(130)

(155)

(50)

(22)

我的朋友
相关博文
  • ·
  • ·
  • ·
  • ·
  • ·
  • ·
  • ·
  • ·
  • ·
  • ·

分类: python/ruby

2022-06-24 17:20:17

import warnings

# 忽视警告

warnings.filterwarnings('ignore')

import os

import matplotlib

import cv2 as cv

import numpy as np

import matplotlib.pyplot as plt

from tensorflow.keras.callbacks import modelcheckpoint, reducelronplateau, earlystopping

from tensorflow.keras.applications.imagenet_utils import preprocess_input

from tensorflow.keras import backend as k

from tensorflow.keras.optimizers import adam

k.image_data_format() == 'channels_last'

from keras_py.utils import get_random_data

from keras_py.face_rec import mask_rec

from keras_py.face_rec import face_rec

from keras_py.mobilenet import mobilenet

from tensorflow.keras.preprocessing.image import imagedatagenerator

# 数据集路径

basic_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/"

def letterbox_image(image, size):  # 调整图片尺寸,返回经过调整的照片

    new_image = cv.resize(image, size, interpolation=cv.inter_area)

    return new_image

read_img = cv.imread("test1.jpg")

print("调整前图片的尺寸:", read_img.shape)

read_img = letterbox_image(image=read_img, size=(50, 50))

print("调整前图片的尺寸:", read_img.shape)

def processing_data(data_path, height, width, batch_size=32, test_split=0.1):  # 数据处理,batch_size默认大小为32

    train_data = imagedatagenerator(

            # 对图片的每个像素值均乘上这个放缩因子,把像素值放缩到01之间有利于模型的收敛

            rescale=1. / 255,  

            # 浮点数,剪切强度(逆时针方向的剪切变换角度)

            shear_range=0.1,  

            # 随机缩放的幅度,若为浮点数,则相当于[lower,upper] = [1 - zoom_range, 1 zoom_range]

            zoom_range=0.1,

            # 浮点数,图片宽度的某个比例,数据提升时图片水平偏移的幅度

            width_shift_range=0.1,

            # 浮点数,图片高度的某个比例,数据提升时图片竖直偏移的幅度

            height_shift_range=0.1,

            # 布尔值,进行随机水平翻转

            horizontal_flip=true,

            # 布尔值,进行随机竖直翻转

            vertical_flip=true,

            # 0 1 之间浮动。用作验证集的训练数据的比例

            validation_split=test_split  

    )

    # 接下来生成测试集,可以参考训练集的写法

    test_data = imagedatagenerator(

            rescale=1. / 255,

            validation_split=test_split)

    train_generator = train_data.flow_from_directory(

            # 提供的路径下面需要有子目录

            data_path,

            # 整数元组 (height, width),默认:(256, 256)。 所有的图像将被调整到的尺寸。

            target_size=(height, width),

            # 一批数据的大小

            batch_size=batch_size,

            # "categorical", "binary", "sparse", "input" none 之一。

            # 默认:"categorical",返回one-hot 编码标签。

            class_mode='categorical',

            # 数据子集 ("training" "validation")

            subset='training',

            seed=0)

    test_generator = test_data.flow_from_directory(

            data_path,

            target_size=(height, width),

            batch_size=batch_size,

            class_mode='categorical',

            subset='validation',

            seed=0)

    return train_generator, test_generator

# 数据路径

data_path = basic_path 'image'

# 图像数据的行数和列数

height, width = 160, 160

# 获取训练数据和验证数据集

train_generator, test_generator = processing_data(data_path, height, width)

# 通过属性class_indices可获得文件夹名与类的序号的对应字典。

labels = train_generator.class_indices

print(labels)

# 转换为类的序号与文件夹名对应的字典

labels = dict((v, k) for k, v in labels.items())

print(labels)

pnet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/pnet.h5"

rnet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/rnet.h5"

onet_path = "./datasets/5f680a696ec9b83bb0037081-momodel/data/keras_model_data/onet.h5"

# 加载 mobilenet 的预训练模型权重

weights_path = basic_path 'keras_model_data/mobilenet_1_0_224_tf_no_top.h5'

# 图像数据的行数和列数

height, width = 160, 160

model = mobilenet(input_shape=[height,width,3],classes=2)

model.load_weights(weights_path,by_name=true)

print('加载完成...')

def save_model(model, checkpoint_save_path, model_dir):  # 保存模型

    if os.path.exists(checkpoint_save_path):

        print("模型加载中")

        model.load_weights(checkpoint_save_path)

        print("模型加载完毕")

    checkpoint_period = 跟单网gendan5.commodelcheckpoint(

        # 模型存储路径

        model_dir 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',

        # 检测的指标

        monitor='val_acc',

        # auto’,‘min’,‘max’中选择

        mode='max',

        # 是否只存储模型权重

        save_weights_only=false,

        # 是否只保存最优的模型

        save_best_only=true,

        # 检测的轮数是每隔2

        period=2

    )

    return checkpoint_period

checkpoint_save_path = "./results/last_one88.h5"

model_dir = "./results/"

checkpoint_period = save_model(model, checkpoint_save_path, model_dir)

# 学习率下降的方式,acc三次不下降就下降学习率继续训练

reduce_lr = reducelronplateau(

                        monitor='accuracy',  # 检测的指标

                        factor=0.5,     # acc不下降时将学习率下调的比例

                        patience=3,     # 检测轮数是每隔三轮

                        verbose=2       # 信息展示模式

                    )

early_stopping = earlystopping(

                            monitor='val_accuracy',  # 检测的指标

                            min_delta=0.0001,         # 增大或减小的阈值

                            patience=3,         # 检测的轮数频率

                            verbose=1            # 信息展示的模式

                        )

# 一次的训练集大小

batch_size = 64

# 图片数据路径

data_path = basic_path 'image'

# 图片处理

train_generator, test_generator = processing_data(data_path, height=160, width=160, batch_size=batch_size, test_split=0.1)

# 编译模型

model.compile(loss='binary_crossentropy',  # 二分类损失函数   

              optimizer=adam(lr=0.001),   # 优化器

              metrics=['accuracy'])        # 优化目标

# 训练模型

history = model.fit(train_generator,    

                    epochs=20, # epochs: 整数,数据的迭代总轮数。

                    # 一个epoch包含的步数,通常应该等于你的数据集的样本数量除以批量大小。

                    steps_per_epoch=637 // batch_size,

                    validation_data=test_generator,

                    validation_steps=70 // batch_size,

                    initial_epoch=0, # 整数。开始训练的轮次(有助于恢复之前的训练)。

                    callbacks=[checkpoint_period, reduce_lr])

# 保存模型

model.save_weights(model_dir 'temp.h5')

plt.plot(history.history['loss'],label = 'train_loss')

plt.plot(history.history['val_loss'],'r',label = 'val_loss')

plt.legend()

plt.show()

plt.plot(history.history['accuracy'],label = 'acc')

plt.plot(history.history['val_accuracy'],'r',label = 'val_acc')

plt.legend()

plt.show()

阅读(6186) | 评论(0) | 转发(0) |
0

上一篇:

下一篇:

给主人留下些什么吧!~~
")); function link(t){ var href= $(t).attr('href'); href ="?url=" encodeuricomponent(location.href); $(t).attr('href',href); //setcookie("returnouturl", location.href, 60, "/"); }
网站地图