前言

  1. 如何在anconda创建mmdetection虚拟环境和用pycham为项目配置环境见(linux)mmdetection环境配置gpu+anaconda+pycham+ RTX2080ti 笔记
  2. mmdetection链接 https://github.com/open-mmlab/mmdetection或mirrors / open-mmlab / mmdetection · GitCode 
  3. voc转coco参考记录一下:DETR训练自制VOC转COCO数据集的过程

数据集准备

在mmdetection文件夹下新建data文件夹,需要coco数据集格式如下(test测试集可有可无)

coco数据集位置格式
mmdetection
├── data
     ├── coco: 数据集根目录
          ├── train2017
          ├── val2017
          ├── test2017
          └── annotations: 对应标注文件夹
                    ├── instances_train2017.json
                    ├── instances_val2017.json
                    └── instances_test2017.json

voc转coco

1.先创建instances_train2017.json,instances_val2017.json和instances_test2017.json空文本。可通过新建txt文件再重命名修改文件扩展名创建json格式文本

2.根据对应的txt文件将xml标注文件转json,需要修改PRE_DEFINE_CATEGORIES和数据集路径

# 根据对应的txt文件将xml标注文件转json
import sys
import os
import json
import xml.etree.ElementTree as ET

START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = {"person":1,  "elephant":2,  "lion":3,  "giraffe":4}  #修改的地方,修改为自己的类别

def get(root, name):
    vars = root.findall(name)
    return vars


def get_and_check(root, name, length):
    vars = root.findall(name)
    if len(vars) == 0:
        raise NotImplementedError('Can not find %s in %s.' % (name, root.tag))
    if length > 0 and len(vars) != length:
        raise NotImplementedError('The size of %s is supposed to be %d, but is %d.' % (name, length, len(vars)))
    if length == 1:
        vars = vars[0]
    return vars


def get_filename_as_int(filename):
    try:
        filename = os.path.splitext(filename)[0]
        return int(filename)
    except:
        raise NotImplementedError('Filename %s is supposed to be an integer.' % (filename))


def convert(xml_list, xml_dir, json_file):
    list_fp = open(xml_list, 'r')
    json_dict = {"images": [], "type": "instances", "annotations": [],
                 "categories": []}
    categories = PRE_DEFINE_CATEGORIES
    bnd_id = START_BOUNDING_BOX_ID
    for line in list_fp:
        line = line.strip()
        line = line + ".xml"
        print("Processing %s" % (line))
        xml_f = os.path.join(xml_dir, line)
        tree = ET.parse(xml_f)
        root = tree.getroot()
        path = get(root, 'path')
        if len(path) == 1:
            filename = os.path.basename(path[0].text)
        elif len(path) == 0:
            filename = get_and_check(root, 'filename', 1).text
        else:
            raise NotImplementedError('%d paths found in %s' % (len(path), line))
        ## The filename must be a number
        image_id = get_filename_as_int(filename)
        size = get_and_check(root, 'size', 1)
        width = int(get_and_check(size, 'width', 1).text)
        height = int(get_and_check(size, 'height', 1).text)
        image = {'file_name': filename, 'height': height, 'width': width,
                 'id': image_id}
        json_dict['images'].append(image)
        ## Cruuently we do not support segmentation
        #  segmented = get_and_check(root, 'segmented', 1).text
        #  assert segmented == '0'
        for obj in get(root, 'object'):
            category = get_and_check(obj, 'name', 1).text
            if category not in categories:
                new_id = len(categories)
                categories[category] = new_id
            category_id = categories[category]
            bndbox = get_and_check(obj, 'bndbox', 1)
            xmin = int(get_and_check(bndbox, 'xmin', 1).text)
            ymin = int(get_and_check(bndbox, 'ymin', 1).text)
            xmax = int(get_and_check(bndbox, 'xmax', 1).text)
            ymax = int(get_and_check(bndbox, 'ymax', 1).text)
            assert (xmax > xmin+2)
            assert (ymax > ymin+2)
            o_width = abs(xmax - xmin)
            o_height = abs(ymax - ymin)
            ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id':
                image_id, 'bbox': [xmin, ymin, o_width, o_height],
                   'category_id': category_id, 'id': bnd_id, 'ignore': 0,
                   'segmentation': []}
            json_dict['annotations'].append(ann)
            bnd_id = bnd_id + 1

    for cate, cid in categories.items():
        cat = {'supercategory': 'none', 'id': cid, 'name': cate}
        json_dict['categories'].append(cat)
    json_fp = open(json_file, 'w')
    json_str = json.dumps(json_dict)
    json_fp.write(json_str)
    json_fp.close()
    list_fp.close()


if __name__ == '__main__':
    # xml_list为xml文件存放的txt文件名    xml_dir为真实xml的存放路径    json_file为存放的json路径
    xml_list = '你的数据集路径/code/datasets/BIRDSAI/cocoBIRDSAI/ImageSets/Main/train.txt'
    xml_dir = '你的数据集路径/code/datasets/BIRDSAI/cocoBIRDSAI/Annotations/'
    json_dir = '你的数据集路径/annotations/instances_trainl2017.json'  # 注意!!!这里instances_trainl2017.json先要自己创建
    convert(xml_list, xml_dir, json_dir)

3. 根据对应的txt文件将jpg图像文件转移到对应文件夹,需要新建train2017,val2017和test2017文件夹

#根据txt名称列表文件转移图像等数据文件
import shutil
file = open('你的数据集路径/ImageSets/Main/train.txt', 'r')
number_list = file.readlines()
for i in range(len(number_list)):
    number_list[i] = number_list[i].strip()
print(number_list)

src_path = '你的数据集路径/JPEGImages/'#图像路径
target_path = '你的数据集路径/train2017/'
while True:
    try:
        for number in number_list:
            shutil.move(src_path + number + '.jpg', target_path + number + '.jpg')  # 文件名
    except:
        break

数据集训练与测试

1. 如果你划分了测试集,并且想使用测试集,需要修改以下三个文件

configs/_base_/datasets文件夹下的coco_detection.py、coco_instance.py和coco_instance_semantic.py最后几行
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_val2017.json',
         img_prefix=data_root + 'val2017/',
     改为
     test=dict(
         type=dataset_type,
         ann_file=data_root + 'annotations/instances_test2017.json',
         img_prefix=data_root + 'test2017/',

2. 如果你的显卡不是很好,更改训练时图像尺寸以提高训练速度
configs/_base_/datasets文件夹下的coco_detection.py、coco_instance.py和coco_instance_semantic.py中
所有的img_scale=(1333, 800)--> img_scale=(600, 400)#根据自己需要修改大小

3. 修改目标类别class_names.py和coco.py文件

mmdet/core/evaluation/class_names.py文件
 def coco_classes():
     return [
        'person',  'elephant',  'lion',  'giraffe'
     ]#你自己的目标类别mmdet/datasets/coco.py文件
 class CocoDataset(CustomDataset):
     CLASSES = ('person',  'elephant',  'lion',  'giraffe')#你自己的目标类别

4.修改目标类别数量 num_class=

configs/_base_/models/faster_rcnn_r50_fpn.py文件
 num_classes=80-->num_classes=4

5. 训练参数修改,主要是configs/_base_/schedules/schedule_1x.py文件
学习率lr   最大迭代数量max_epochs,根据需要修改

6. 修改完成后最好重新编译python setup.py develop再运行

7.训练指令

多GPU   bash ./tools/dist_train.sh configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py 2 (2是GPU数量根据你自己的显卡数修改)
单GPU   python tools/train.py configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py

8. 训练完成后数据存放在work_dirs/faster_rcnn_r50_fpn_1x_coco/文件夹中
   latest.pth文件为最终的网络模型,faster_rcnn_r50_fpn_1x_coco.py为模型和数据集参数文件。用于接下来的测试集测试

9. 测试评估

多GPU   ./tools/dist_test.sh ./work_dirs/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco.py ./work_dirs/faster_rcnn_r50_fpn_1x_coco/latest.pth 2 --eval bbox --options "classwise=True"
单GPUpython tools/test.py ./work_dirs/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco.py ./work_dirs/faster_rcnn_r50_fpn_1x_coco/latest.pth  --eval bbox --options "classwise=True"  --eval bbox表示评估mAP
--options "classwise=True"表示评估每个类别的AP