PyTorch 原文:
https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
参考文章:
# License: BSD
# Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mod 打开交互模式e
# Data augmentation and normalization for training
# Just normalization for validation
# torchvision.transforms模块提供了一般的图像转换操作类,一般使用Compose把多个步骤整合到一起
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224), # 先将给定的PIL.Image随机切,然后再resize成给定的尺寸大小,裁剪到224*224
transforms.RandomHorizontalFlip(), # 随机水平翻转给定的PIL.Image,概率为0.5,即一半的概率翻转,一半的概率不翻转
transforms.ToTensor(), # 将 PIL Image 或者 ndarray 转换为 tensor,并且归一化至[0-1], shape=chw
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 给定均值(R,G,B),方差(R,G,B),将会把tensor正则化,即 Normalized_image = (image-mean)/std
]),
'val': transforms.Compose([
transforms.Resize(256), # 将输入的PIL图像调整为给定的大小
transforms.CenterCrop(224), # 将给定的PIL.Image进行中心切割
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 对数据按通道进行标准化
]),
}
# Tensor总结
# (1)Tensor 和 Numpy都是矩阵,区别是前者可以在GPU上运行,后者只能在CPU上;
# (2)Tensor和Numpy互相转化很方便,类型也比较兼容
# (3)Tensor可以直接通过print显示数据类型,而Numpy不可以
# Visualize a few images
# Let’s visualize a few training images so as to understand the data augmentations.
def imshow(inp, title=None):
"""Imshow for Tensor."""
# transpose是求转置矩阵函数
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
# clip(a, a_min, a_max, out=None)表示数组a中所有的数限定到范围a_min和a_max中
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(1) # pause a bit so that plots are updated
data_dir = 'hymenoptera_data'
# ImageFolder的第一个参数是在指定的路径下寻找图片,第二个参数transform是指对图像进行转换操作
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# dataLoader第一个参数是传入的数据集,第二个参数batch_size表示每个batch有多少个样本,shuffle表示在每个epoch开始的时候,对数据进行重新排序
# num_workers(int, optional)这个参数决定了有几个进程来处理data loading。0意味着所有的数据都会被load进主进程(默认为0)。
# !!!!!!!!!注意,这里num_workers的参数要改成0,不然在我电脑上就会出错
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=0) for x in ['train', 'val']}
# 得到训练集和测试集的长度
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
# 得到类别的名称
class_names = image_datasets['train'].classes
# 检验是否可用cuda
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# ------------------- isualize a few images --------------------- #
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
# mark_grid的作用是将若干幅图像拼成一幅图像。其中padding的作用就是子图像与子图像的pad有多宽
# 此时make_grid的输入仍为Tensor(chw),而imshow的时候要转回hwc,而后要乘以方差并加上均值
out = torchvision.utils.make_grid(inputs)
# imshow(out, title=[class_names[x] for x in classes])
# 导入预训练的模型,如果pretrained=False或者为不带参数表示只导入网络结构,不导入参数
# model_ft即为含训练好参数的残差网络
model_ft = models.resnet18(pretrained=True)
# 最后一个全连接的输入维度,这里实为512
num_ftrs = model_ft.fc.in_features
# Here the size of each output sample is set to 2.
# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
# 将最后一个全连接由(512, 1000)改成(512, 2)
model_ft.fc = nn.Linear(num_ftrs, 2)
# 将所有最开始读取数据时的tensor变量copy一份到device所指定的GPU上去,之后的运算都在GPU上进行
model_ft = model_ft.to(device)
# 定义计算损失的函数
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
# 神经网络优化器,优化我们的网络,使网络在训练过程中快起来,节省网络训练时间
# 要想优化网络,并且先构造一个优化器
# SGD(stochastic Gradient Descent),SGD会把数据拆分后再分批不断放入NN中计算,每次使用一批数据,虽然不能反映整体数据的情况,不过却很
# 程度上加速了NN的训练过程,而且不会跌势太多准确率
# Momentum 传统的参数 W 的更新是把原始的 W 累加上一个负的学习率(learning rate) 乘以校正值 (dx)。
# 我们把这个人从平地上放到了一个斜坡上, 只要他往下坡的方向走一点点, 由于向下的惯性, 他不自觉地就一直往下走, 走的弯路也变少了. 这就是 Momentum 参数更新
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
# lr_scheduler是所有学习率改变策略的基类
# lr_scheduler提供了基于多种epoch数目调整学习率的方法
# step_size为int型,表示学习衰减期,指几个epoch衰减一次,gamma为学习衰减的乘积因子
# 每7个epoch衰减0.1倍
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
# 定义样本跑的次数
num_epochs = 25
# ----------------------- 训练模型 ---------------------- #
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
# 计算模型初始开始训练时间
since = time.time()
# 先拷贝一份当前模型的参数,后面迭代过程中若遇到更优模型则替换
best_model_wts = copy.deepcopy(model.state_dict())
# 初始准确率
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode,设置为训练模式
else:
model.eval() # Set model to evaluate mode,设置为测试模式
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
# 把梯度置零,把loss关于weight的倒数变成0
# 为下一次的训练清空上一步的残余更新参数值
optimizer.zero_grad()
# forward
# track history if only in train
# set_grad_enabled用于设置梯度打开或关闭的上下文管理器
# set_grad_enabled将基于它的参数mode使用或禁用梯度。标记是否使能梯度,主要用在有条件的使能梯度
with torch.set_grad_enabled(phase == 'train'):
# 前向传播求出预测的值
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
# 求损失
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
# 误差反向传播,计算参数更新值
loss.backward()
# 将参数更新值施加到网络的参数上
optimizer.step()
# statistics
# 计算损失值和精确度
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
# 应用概率学习率的策略,更新optimizer对象每个para_group字典的lr键的值,param_group['lr']=lr
# 在训练的时候进行迭代
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
# 当验证时遇到了更好的模型则予以保留
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
# 拷贝模型参数
best_model_wts = copy.deepcopy(model.state_dict())
print()
# 显示计算模型训练时间
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
# 载入最优模型参数
model.load_state_dict(best_model_wts)
return model
# model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)
# ----------------- visualizing the model predictions ----------------- #
def visualize_model(model, num_images=6):
# 检验是否为训练模型
was_training = model.training
# 把模式设置为测试模式
model.eval()
images_so_far = 0
fig = plt.figure()
# operations inside don't track history
# 使下面的计算图不占用内存,不保存梯度,减小内存的占用
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
visualize_model(model_ft)
# ----------------------------------- 局部微调 --------------------------------------- #
# ConvNet as fixed feature extractor
# 冻结除全连接层以外的最后所有层参数。我们需要设置 requires_grad == False 来冻结参数以便在反向过程中不需要计算梯度
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
# 将所有参数求导设为否
param.requires_grad = False
# Parameters of newly constructed modules have requires_grad=True by default
# 得到最后一个全连接的输入维度
num_ftrs = model_conv.fc.in_features
# 将最后一个全连接由(512, 1000)改成(512, 2),取代最后一个全连接
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that only parameters of final layer are being optimized as opposed to before.
# !!!!!!!注意,这里只有全连接层参数被优化
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
# On CPU this will take about half the time compared to previous scenario.
# This is expected as gradients don’t need to be computed for most of the network. However, forward does need to be computed.
model_conv = train_model(model_conv, criterion, optimizer_conv, exp_lr_scheduler, num_epochs=25)
visualize_model(model_conv)
# 显示前关掉交叉模式
plt.ioff()
plt.show()
pass
本人初学水平,主要参考他人代码,有错误欢迎指正