从爬取到训练:使用PaddlePaddle构建自定义图像分类模型

前言

本章将介绍如何使用PaddlePaddle训练自己的图片数据集。在之前的章节中,我们使用的是PaddlePaddle自带的数据集(如CIFAR-10),而本章将详细讲解如何从爬取图像开始,经过数据预处理、创建图像列表、定义模型、数据读取到最终训练和预测的完整流程。

1. 爬取图像数据

首先,我们需要获取自己的图像数据集。这里以爬取百度图片为例,编写一个爬虫程序。

1.1 创建download_image.py文件

import re
import uuid
import requests
import os
import numpy
import imghdr
from PIL import Image

# 获取百度图片下载图片
def download_image(key_word, save_name, download_max):
    download_sum = 0
    # 把每个类别的图片存放在单独一个文件夹中
    save_path = 'images' + '/' + save_name
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    while download_sum < download_max:
        download_sum += 1
        str_pn = str(download_sum)
        # 定义百度图片的路径
        url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&' \
              'word=' + key_word + '&pn=' + str_pn + '&gsm=80&ct=&ic=0&lm=-1&width=0&height=0'
        try:
            s = requests.session()
            s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
            # 获取当前页面的源码
            result = s.get(url).content.decode('utf-8')
            # 获取当前页面的图片URL
            img_urls = re.findall('"objURL":"(.*?)",', result, re.S)
            if img_urls is None or len(img_urls) < 1:
                break
            # 开始下载图片
            for img_url in img_urls:
                # 获取图片内容
                img = requests.get(img_url, timeout=30)
                # 保存图片
                with open(save_path + '/' + str(uuid.uuid1()) + '.jpg', 'wb') as f:
                    f.write(img.content)
                print('正在下载 %s 的第 %d 张图片' % (key_word, download_sum))
                download_sum += 1
                # 下载次数超过指定值就停止下载
                if download_sum >= download_max:
                    break
        except Exception as e:
            print(e)
            continue
    print('下载完成')

# 删除不是JPEG或者PNG格式的图片
def delete_error_image(father_path):
    # 获取父级目录的所有文件以及文件夹
    try:
        image_dirs = os.listdir(father_path)
        for image_dir in image_dirs:
            image_dir = os.path.join(father_path, image_dir)
            # 如果是文件夹就继续获取文件夹中的图片
            if os.path.isdir(image_dir):
                images = os.listdir(image_dir)
                for image in images:
                    image = os.path.join(image_dir, image)
                    try:
                        # 获取图片的类型
                        image_type = imghdr.what(image)
                        # 如果图片格式不是JPEG同时也不是PNG就删除图片
                        if image_type is not 'jpeg' and image_type is not 'png':
                            os.remove(image)
                            print('已删除:%s' % image)
                            continue
                        # 删除灰度图
                        img = numpy.array(Image.open(image))
                        if len(img.shape) is 2:
                            os.remove(image)
                            print('已删除:%s' % image)
                    except:
                        os.remove(image)
                        print('已删除:%s' % image)
    except:
        pass

if __name__ == '__main__':
    # 定义要下载的图片中文名称和英文名称,ps:英文名称主要是为了设置文件夹名
    key_words = {'西瓜': 'watermelon', '哈密瓜': 'cantaloupe',
                 '樱桃': 'cherry', '苹果': 'apple', '黄瓜': 'cucumber', '胡萝卜': 'carrot'}
    # 每个类别下载一千个
    max_sum = 500
    for key_word in key_words:
        save_name = key_words[key_word]
        download_image(key_word, save_name, max_sum)

    # 删除错误图片
    delete_error_image('images/')

1.2 运行爬虫

执行上述脚本后,将在images目录下生成各个类别的图片文件夹,每个文件夹中包含指定数量的图片。

注意:下载完成后,可能仍存在不属于当前类别的图片,需手动删除。

2. 创建图像列表

为了让PaddlePaddle能够识别和处理自定义图像数据集,需要生成固定格式的图像列表文件。

2.1 创建create_data_list.py文件

import json
import os

def create_data_list(data_root_path):
    with open(data_root_path + "test.list", 'w') as f:
        pass
    with open(data_root_path + "train.list", 'w') as f:
        pass
    # 所有类别的信息
    class_detail = []
    # 获取所有类别
    class_dirs = os.listdir(data_root_path)
    # 类别标签
    class_label = 0
    # 获取总类别的名称
    father_paths = data_root_path.split('/')
    while True:
        if father_paths[len(father_paths) - 1] == '':
            del father_paths[len(father_paths) - 1]
        else:
            break
    father_path = father_paths[len(father_paths) - 1]

    all_class_images = 0
    other_file = 0
    # 读取每个类别
    for class_dir in class_dirs:
        if class_dir == 'test.list' or class_dir == "train.list" or class_dir == 'readme.json':
            other_file += 1
            continue
        print('正在读取类别:%s' % class_dir)
        # 每个类别的信息
        class_detail_list = {}
        test_sum = 0
        trainer_sum = 0
        # 统计每个类别有多少张图片
        class_sum = 0
        # 获取类别路径
        path = data_root_path + "/" + class_dir
        # 获取所有图片
        img_paths = os.listdir(path)
        for img_path in img_paths:
            # 每张图片的路径
            name_path = class_dir + '/' + img_path
            # 如果不存在这个文件夹,就创建
            if not os.path.exists(data_root_path):
                os.makedirs(data_root_path)
            # 每10张图片取一个做测试数据
            if class_sum % 10 == 0:
                test_sum += 1
                with open(data_root_path + "test.list", 'a') as f:
                    f.write(name_path + "\t%d" % class_label + "\n")
            else:
                trainer_sum += 1
                with open(data_root_path + "train.list", 'a') as f:
                    f.write(name_path + "\t%d" % class_label + "\n")
            class_sum += 1
            all_class_images += 1
        # 说明的json文件的class_detail数据
        class_detail_list['class_name'] = class_dir
        class_detail_list['class_label'] = class_label
        class_detail_list['class_test_images'] = test_sum
        class_detail_list['class_trainer_images'] = trainer_sum
        class_detail.append(class_detail_list)
        class_label += 1
    # 获取类别数量
    all_class_sum = len(class_dirs) - other_file
    # 说明的json文件信息
    readjson = {}
    readjson['all_class_name'] = father_path
    readjson['all_class_sum'] = all_class_sum
    readjson['all_class_images'] = all_class_images
    readjson['class_detail'] = class_detail
    jsons = json.dumps(readjson, sort_keys=True, indent=4, separators=(',', ': '))
    with open(data_root_path + "readme.json", 'w') as f:
        f.write(jsons)
    print('图像列表已生成')

if __name__ == '__main__':
    # 把生产的数据列表都放在自己的总类别文件夹中
    data_root_path = "images/"
    create_data_list(data_root_path)

2.2 运行图像列表生成程序

执行上述脚本后,将在images目录下生成train.listtest.listreadme.json文件,其中:
- train.list:训练图像列表(每行格式:图片路径\t标签
- test.list:测试图像列表
- readme.json:数据集说明文件

3. 定义模型

使用MobileNet V1模型进行图像分类,该模型轻量且适合嵌入式设备。

3.1 创建mobilenet_v1.py文件

import paddle.fluid as fluid

def conv_bn_layer(input, filter_size, num_filters, stride,
                  padding, channels=None, num_groups=1, act='relu', use_cudnn=True):
    conv = fluid.layers.conv2d(input=input,
                               num_filters=num_filters,
                               filter_size=filter_size,
                               stride=stride,
                               padding=padding,
                               groups=num_groups,
                               act=None,
                               use_cudnn=use_cudnn,
                               bias_attr=False)

    return fluid.layers.batch_norm(input=conv, act=act)

def depthwise_separable(input, num_filters1, num_filters2, num_groups, stride, scale):
    depthwise_conv = conv_bn_layer(input=input,
                                   filter_size=3,
                                   num_filters=int(num_filters1 * scale),
                                   stride=stride,
                                   padding=1,
                                   num_groups=int(num_groups * scale),
                                   use_cudnn=False)

    pointwise_conv = conv_bn_layer(input=depthwise_conv,
                                   filter_size=1,
                                   num_filters=int(num_filters2 * scale),
                                   stride=1,
                                   padding=0)
    return pointwise_conv

def net(input, class_dim, scale=1.0):
    # conv1: 112x112
    input = conv_bn_layer(input=input,
                          filter_size=3,
                          channels=3,
                          num_filters=int(32 * scale),
                          stride=2,
                          padding=1)

    # 56x56
    input = depthwise_separable(input=input,
                                num_filters1=32,
                                num_filters2=64,
                                num_groups=32,
                                stride=1,
                                scale=scale)

    input = depthwise_separable(input=input,
                                num_filters1=64,
                                num_filters2=128,
                                num_groups=64,
                                stride=2,
                                scale=scale)

    # 28x28
    input = depthwise_separable(input=input,
                                num_filters1=128,
                                num_filters2=128,
                                num_groups=128,
                                stride=1,
                                scale=scale)

    input = depthwise_separable(input=input,
                                num_filters1=128,
                                num_filters2=256,
                                num_groups=128,
                                stride=2,
                                scale=scale)

    # 14x14
    input = depthwise_separable(input=input,
                                num_filters1=256,
                                num_filters2=256,
                                num_groups=256,
                                stride=1,
                                scale=scale)

    input = depthwise_separable(input=input,
                                num_filters1=256,
                                num_filters2=512,
                                num_groups=256,
                                stride=2,
                                scale=scale)

    # 14x14
    for i in range(5):
        input = depthwise_separable(input=input,
                                    num_filters1=512,
                                    num_filters2=512,
                                    num_groups=512,
                                    stride=1,
                                    scale=scale)
    # 7x7
    input = depthwise_separable(input=input,
                                num_filters1=512,
                                num_filters2=1024,
                                num_groups=512,
                                stride=2,
                                scale=scale)

    input = depthwise_separable(input=input,
                                num_filters1=1024,
                                num_filters2=1024,
                                num_groups=1024,
                                stride=1,
                                scale=scale)

    feature = fluid.layers.pool2d(input=input,
                                  pool_size=0,
                                  pool_stride=1,
                                  pool_type='avg',
                                  global_pooling=True)

    net = fluid.layers.fc(input=feature,
                          size=class_dim,
                          act='softmax')
    return net

4. 定义数据读取

创建数据读取器,实现图像预处理和数据增强。

4.1 创建reader.py文件

import os
import random
import numpy as np
import paddle
from PIL import Image

# 训练图片的预处理
def train_mapper(sample):
    img_path, label, crop_size, resize_size = sample
    try:
        img = Image.open(img_path)
        # 统一图片大小
        img = img.resize((resize_size, resize_size), Image.ANTIALIAS)
        # 随机水平翻转
        if random.random() > 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
        # 随机角度翻转
        img = img.rotate(random.randint(-3, 3), expand=False)
        # 随机裁剪
        r4 = random.randint(0, int(resize_size - crop_size))
        r5 = random.randint(0, int(resize_size - crop_size))
        img = img.crop((r4, r5, r4 + crop_size, r5 + crop_size))
        # 转换为numpy数组
        img = np.array(img).astype(np.float32)
        # 转换为CHW格式
        img = img.transpose((2, 0, 1))
        # 转换为BGR并归一化
        img = img[(2, 1, 0), :, :] / 255.0
        return img, int(label)
    except:
        print("%s 该图片错误,请删除该图片并重新创建图像数据列表" % img_path)

# 获取训练的reader
def train_reader(train_list_path, crop_size, resize_size):
    father_path = os.path.dirname(train_list_path)

    def reader():
        with open(train_list_path, 'r') as f:
            lines = f.readlines()
            np.random.shuffle(lines)
            for line in lines:
                img, label = line.strip().split('\t')
                img = os.path.join(father_path, img)
                yield img, label, crop_size, resize_size

    return paddle.reader.xmap_readers(train_mapper, reader, os.cpu_count(), 102400)

# 测试图片的预处理
def test_mapper(sample):
    img_path, label, crop_size = sample
    img = Image.open(img_path)
    img = img.resize((crop_size, crop_size), Image.ANTIALIAS)
    img = np.array(img).astype(np.float32)
    img = img.transpose((2, 0, 1))
    img = img[(2, 1, 0), :, :] / 255.0
    return img, int(label)

# 获取测试的reader
def test_reader(test_list_path, crop_size):
    father_path = os.path.dirname(test_list_path)

    def reader():
        with open(test_list_path, 'r') as f:
            lines = f.readlines()
            for line in lines:
                img, label = line.strip().split('\t')
                img = os.path.join(father_path, img)
                yield img, label, crop_size

    return paddle.reader.xmap_readers(test_mapper, reader, os.cpu_count(), 1024)

5. 训练模型

使用上述定义的模型和数据读取器进行训练。

5.1 创建train.py文件

```python
import os
import shutil
import mobilenet_v1
import paddle as paddle
import reader
import paddle.fluid as fluid

定义参数

crop_size = 224
resize_size = 250
class_dim = 6 # 类别数量

定义输入层

image = fluid.layers.data(name=’image’, shape=[3, crop_size, crop_size], dtype=’float32’)
label = fluid.layers.data(name=’label’, shape=[1], dtype=’int64’)

获取MobileNet模型

model = mobilenet_v1.net(image, class_dim)

定义损失函数和准确率

cost = fluid.layers.cross_entropy(input=model, label=label)
avg

Xiaoye