微调
- 网络架构
- 一个神经网络一般可以分成两块
- 特征抽取将原始像素变成容易线性分割的特征
- 线性分类器来做分类
- 微调
- 思路—将相同功能的网络及其参数直接进行迁移使用,而并不是通过重新学习,只改变部分层次即可
- 训练
- 是一个目标数据集上的正常训练任务但使用更强的正则化
- 使用更小的学习率
- 使用更少的数据迭代
- 源数据集远复杂于目标数据,通常微调效果更好
- 重用分类器权重
- 源数据集可能也有目标数据中的部分标号
- 可以使用预训练好模型分类器中对应标号对应的向量来做初始化
- 固定一些层—可以有效降低模型复杂度
- 神经网络通常学习有层次的特征表示
- 低层次的特征更加通用,因此可以固定权重
- 高层次的特征则更跟数据集相关
- 可以固定底部一些层的参数,不参与更新
- 更强的正则
- 总结
- 微调通过使用在大数据上得到的预训练好的模型来初始化模型权重来完成提升精度
- 预训练模型质量很重要
- 微调通常速度更快、精度更高
%matplotlib inline
import os
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
# 载入数据集
d2l.DATA_HUB['hotdog'] = (d2l.DATA_URL + 'hotdog.zip',
'fba480ffa8aa7e0febbb511d181409f899b9baa5')
data_dir = d2l.download_extract('hotdog')
train_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train'))
test_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test'))
# 展示载入后多图片---发现图片大小不一致
hotdogs = [train_imgs[i][0] for i in range(8)]
not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]
d2l.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4);
# 数据增广
normalize = torchvision.transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
# 定义了一个图像的归一化操作,每个像素都会减去均值(0.485, 0.456, 0.406)然后再除以标准差(0.229, 0.224, 0.225)。这个操作可以使得图像的像素值在更稳定的范围内,对训练有好处。
train_augs = torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(), normalize])
test_augs = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(), normalize])
finetune_net = torchvision.models.resnet18(pretrained=True)
# 引入已有模型并且保留其训练好的参数---pretrained=True
finetune_net.fc = nn.Linear(finetune_net.fc.in_features, 2)
# 将该模型的最后一层重新对输出类别分为两类
nn.init.xavier_uniform_(finetune_net.fc.weight);
# 初始化模型最后一层的参数进行训练
# 微调模式下的训练
def train_fine_tuning(net, learning_rate, batch_size=128, num_epochs=5,
param_group=True):
train_iter = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train'),
transform=train_augs),
batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test'),
transform=test_augs),
batch_size=batch_size)
devices = d2l.try_all_gpus()
loss = nn.CrossEntropyLoss(reduction="none")
if param_group:
params_1x = [
param for name, param in net.named_parameters()
if name not in ["fc.weight", "fc.bias"]]
trainer = torch.optim.SGD([{
'params': params_1x}, {
'params': net.fc.parameters(),
'lr': learning_rate * 10}], lr=learning_rate,
weight_decay=0.001)
# 最后一层学习率放大十倍保证训练的快速进行,其他层使用小学习率
else:
trainer = torch.optim.SGD(net.parameters(), lr=learning_rate,
weight_decay=0.001)
d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,
devices)
# 初始学习率较小因为模型基本稳定
train_fine_tuning(finetune_net, 5e-5)
实战kaggle比赛:图像分类(CIFAR-10)
- 网址:https://www.kaggle.com/c/cifar-10
- 代码
!pip install d2l
!pip install matplotlib_inline
!pip install matplotlib==3.0.0
%matplotlib inline
import collections
import math
import os
import shutil
import pandas as pd
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
# 设定数据集所在路径---共两种方法---由demo控制
d2l.DATA_HUB['cifar10_tiny'] = (d2l.DATA_URL + 'kaggle_cifar10_tiny.zip',
'2068874e4b9a9f0fb07ebe0ad2b29754449ccacd')
demo = True
if demo:
data_dir = d2l.download_extract('cifar10_tiny')
else:
data_dir = '../data/cifar-10/'
# 整理数据
def read_csv_labels(fname):
"""读取fname来给标签字典返回一个文件名"""
with open(fname, 'r') as f:
lines = f.readlines()[1:]
token = [l.rstrip().split(',') for l in lines]
return dict(((name, label) for name, label in token))
labels = read_csv_labels(os.path.join(data_dir, 'trainLabels.csv'))
labels
# 将训练集和验证集从原始的训练集中拆分出来
def copyfile(filename, target_dir):
"""将文件复制到目标目录。"""
os.makedirs(target_dir, exist_ok=True) # 是否有目录没有就创建
shutil.copy(filename, target_dir) # 将文件复制到目标目录
def reorg_train_valid(data_dir, labels, valid_ratio):
n = collections.Counter(labels.values()).most_common()[-1][1]
n_valid_per_label = max(1, math.floor(n * valid_ratio))
label_count = {}
for train_file in os.listdir(os.path.join(data_dir, 'train')):
label = labels[train_file.split('.')[0]]
fname = os.path.join(data_dir, 'train', train_file)
copyfile(
fname,
os.path.join(data_dir, 'train_valid_test', 'train_valid', label))
if label not in label_count or label_count[label] < n_valid_per_label:
copyfile(
fname,
os.path.join(data_dir, 'train_valid_test', 'valid', label))
label_count[label] = label_count.get(label, 0) + 1
else:
copyfile(
fname,
os.path.join(data_dir, 'train_valid_test', 'train', label))
return n_valid_per_label
# 测试集的整理
def reorg_test(data_dir):
for test_file in os.listdir(os.path.join(data_dir, 'test')):
copyfile(
os.path.join(data_dir, 'test', test_file),
os.path.join(data_dir, 'train_valid_test', 'test', 'unknown'))
def reorg_cifar10_data(data_dir, valid_ratio):
labels = read_csv_labels(os.path.join(data_dir, 'trainLabels.csv'))
reorg_train_valid(data_dir, labels, valid_ratio)
reorg_test(data_dir)
batch_size = 32 if demo else 128
valid_ratio = 0.1
reorg_cifar10_data(data_dir, valid_ratio)
# 数据增广
transform_train = torchvision.transforms.Compose([
torchvision.transforms.Resize(40),
torchvision.transforms.RandomResizedCrop(32, scale=(0.64, 1.0),
ratio=(1.0, 1.0)),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
transform_test = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
# 读取由原始图像组成的数据集
train_ds, train_valid_ds = [
torchvision.datasets.ImageFolder(
os.path.join(data_dir, 'train_valid_test', folder),
transform=transform_train) for folder in ['train', 'train_valid']]
# 训练集因为需要增广所以和测试分开
valid_ds, test_ds = [
torchvision.datasets.ImageFolder(
os.path.join(data_dir, 'train_valid_test', folder),
transform=transform_test) for folder in ['valid', 'test']]
# 指定上面定义的所有图像增广操作---生成数据迭代器
train_iter, train_valid_iter = [
torch.utils.data.DataLoader(dataset, batch_size, shuffle=True,
drop_last=True)
for dataset in (train_ds, train_valid_ds)]
valid_iter = torch.utils.data.DataLoader(valid_ds, batch_size, shuffle=False,
drop_last=True)
test_iter = torch.utils.data.DataLoader(test_ds, batch_size, shuffle=False,
drop_last=False)
# 模型
def get_net():
num_classes = 10
net = d2l.resnet18(num_classes, 3)
# 输入三通道,输出num_classes分类
return net
loss = nn.CrossEntropyLoss(reduction="none")
# 训练函数
# lr_period,lr_decay 每隔lr_period将学习率降低lr_decay
def train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period,
lr_decay):
trainer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9,
weight_decay=wd)
scheduler = torch.optim.lr_scheduler.StepLR(trainer, lr_period, lr_decay)
# lr = if lr_period==epoch: lr*lr_decay
num_batches, timer = len(train_iter), d2l.Timer()
legend = ['train loss', 'train acc']
if valid_iter is not None:
legend.append('valid acc')
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=legend)
net = nn.DataParallel(net, device_ids=devices).to(devices[0])
# 多个GPU
for epoch in range(num_epochs):
net.train()
metric = d2l.Accumulator(3)
for i, (features, labels) in enumerate(train_iter):
timer.start()
l, acc = d2l.train_batch_ch13(net, features, labels, loss,
trainer, devices)
metric.add(l, acc, labels.shape[0])
timer.stop()
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(
epoch + (i + 1) / num_batches,
(metric[0] / metric[2], metric[1] / metric[2], None))
if valid_iter is not None:
valid_acc = d2l.evaluate_accuracy_gpu(net, valid_iter)
animator.add(epoch + 1, (None, None, valid_acc))
scheduler.step() # 参数更新
# 训练验证
devices, num_epochs, lr, wd = d2l.try_all_gpus(), 50, 2e-4, 5e-4
lr_period, lr_decay, net = 4, 0.9, get_net()
train(net, train_iter, valid_iter, num_epochs, lr, wd, devices, lr_period,
lr_decay)
# 对测试集进行分类并提交结果
net, preds = get_net(), []
train(net, train_valid_iter, None, num_epochs, lr, wd, devices, lr_period,
lr_decay)
for X, _ in test_iter:
y_hat = net(X.to(devices[0]))
preds.extend(y_hat.argmax(dim=1).type(torch.int32).cpu().numpy())
sorted_ids = list(range(1, len(test_ds) + 1))
sorted_ids.sort(key=lambda x: str(x))
df = pd.DataFrame({'id': sorted_ids, 'label': preds})
df['label'] = df['label'].apply(lambda x: train_valid_ds.classes[x])
df.to_csv('submission.csv', index=False)