原文: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
在本教程中,您將學(xué)習(xí)如何使用轉(zhuǎn)移學(xué)習(xí)訓(xùn)練卷積神經(jīng)網(wǎng)絡(luò)進(jìn)行圖像分類(lèi)。 您可以在 cs231n 筆記上了解有關(guān)轉(zhuǎn)移學(xué)習(xí)的更多信息。
引用這些注釋?zhuān)?/p>
實(shí)際上,很少有人從頭開(kāi)始訓(xùn)練整個(gè)卷積網(wǎng)絡(luò)(使用隨機(jī)初始化),因?yàn)閾碛凶銐虼笮〉臄?shù)據(jù)集相對(duì)很少。 相反,通常在非常大的數(shù)據(jù)集上對(duì) ConvNet 進(jìn)行預(yù)訓(xùn)練(例如 ImageNet,其中包含 120 萬(wàn)個(gè)具有 1000 個(gè)類(lèi)別的圖像),然后將 ConvNet 用作初始化或固定特征提取器以完成感興趣的任務(wù)。
這兩個(gè)主要的轉(zhuǎn)移學(xué)習(xí)方案如下所示:
# License: BSD
## Author: Sasank Chilamkurthy
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
plt.ion() # interactive mode
我們將使用 torchvision 和 torch.utils.data 包來(lái)加載數(shù)據(jù)。
我們今天要解決的問(wèn)題是訓(xùn)練一個(gè)模型來(lái)對(duì)螞蟻和蜜蜂進(jìn)行分類(lèi)。 我們?yōu)槲浵伜兔鄯涮峁┝舜蠹s 120 張訓(xùn)練圖像。 每個(gè)類(lèi)別有 75 個(gè)驗(yàn)證圖像。 通常,如果從頭開(kāi)始訓(xùn)練的話,這是一個(gè)很小的數(shù)據(jù)集。 由于我們正在使用遷移學(xué)習(xí),因此我們應(yīng)該能夠很好地概括。
該數(shù)據(jù)集是 imagenet 的很小一部分。
注意
從此處下載數(shù)據(jù),并將其解壓縮到當(dāng)前目錄。
# Data augmentation and normalization for training
## Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
讓我們可視化一些訓(xùn)練圖像,以了解數(shù)據(jù)擴(kuò)充。
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
## Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
## Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
現(xiàn)在,讓我們編寫(xiě)一個(gè)通用函數(shù)來(lái)訓(xùn)練模型。 在這里,我們將說(shuō)明:
以下,參數(shù)scheduler
是來(lái)自torch.optim.lr_scheduler
的 LR 調(diào)度程序?qū)ο蟆?/p>
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
通用功能可顯示一些圖像的預(yù)測(cè)
def visualize_model(model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images//2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[j]]))
imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
加載預(yù)訓(xùn)練的模型并重置最終的完全連接層。
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
## Here the size of each output sample is set to 2.
## Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
model_ft.fc = nn.Linear(num_ftrs, 2)
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
## Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
## Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
在 CPU 上大約需要 15-25 分鐘。 但是在 GPU 上,此過(guò)程不到一分鐘。
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=25)
得出:
Epoch 0/24
----------
train Loss: 0.5582 Acc: 0.6967
val Loss: 0.1987 Acc: 0.9216
Epoch 1/24
----------
train Loss: 0.4663 Acc: 0.8238
val Loss: 0.2519 Acc: 0.8889
Epoch 2/24
----------
train Loss: 0.5978 Acc: 0.7623
val Loss: 1.2933 Acc: 0.6601
Epoch 3/24
----------
train Loss: 0.4471 Acc: 0.8320
val Loss: 0.2576 Acc: 0.8954
Epoch 4/24
----------
train Loss: 0.3654 Acc: 0.8115
val Loss: 0.2977 Acc: 0.9150
Epoch 5/24
----------
train Loss: 0.4404 Acc: 0.8197
val Loss: 0.3330 Acc: 0.8627
Epoch 6/24
----------
train Loss: 0.6416 Acc: 0.7623
val Loss: 0.3174 Acc: 0.8693
Epoch 7/24
----------
train Loss: 0.4058 Acc: 0.8361
val Loss: 0.2551 Acc: 0.9085
Epoch 8/24
----------
train Loss: 0.2294 Acc: 0.9098
val Loss: 0.2603 Acc: 0.9085
Epoch 9/24
----------
train Loss: 0.2805 Acc: 0.8730
val Loss: 0.2765 Acc: 0.8954
Epoch 10/24
----------
train Loss: 0.3139 Acc: 0.8525
val Loss: 0.2639 Acc: 0.9020
Epoch 11/24
----------
train Loss: 0.3198 Acc: 0.8648
val Loss: 0.2458 Acc: 0.9020
Epoch 12/24
----------
train Loss: 0.2947 Acc: 0.8811
val Loss: 0.2835 Acc: 0.8889
Epoch 13/24
----------
train Loss: 0.3097 Acc: 0.8730
val Loss: 0.2542 Acc: 0.9085
Epoch 14/24
----------
train Loss: 0.1849 Acc: 0.9303
val Loss: 0.2710 Acc: 0.9085
Epoch 15/24
----------
train Loss: 0.2764 Acc: 0.8934
val Loss: 0.2522 Acc: 0.9085
Epoch 16/24
----------
train Loss: 0.2214 Acc: 0.9098
val Loss: 0.2620 Acc: 0.9085
Epoch 17/24
----------
train Loss: 0.2949 Acc: 0.8525
val Loss: 0.2600 Acc: 0.9085
Epoch 18/24
----------
train Loss: 0.2237 Acc: 0.9139
val Loss: 0.2666 Acc: 0.9020
Epoch 19/24
----------
train Loss: 0.2456 Acc: 0.8852
val Loss: 0.2521 Acc: 0.9150
Epoch 20/24
----------
train Loss: 0.2351 Acc: 0.8852
val Loss: 0.2781 Acc: 0.9085
Epoch 21/24
----------
train Loss: 0.2654 Acc: 0.8730
val Loss: 0.2560 Acc: 0.9085
Epoch 22/24
----------
train Loss: 0.1955 Acc: 0.9262
val Loss: 0.2605 Acc: 0.9020
Epoch 23/24
----------
train Loss: 0.2285 Acc: 0.8893
val Loss: 0.2650 Acc: 0.9085
Epoch 24/24
----------
train Loss: 0.2360 Acc: 0.9221
val Loss: 0.2690 Acc: 0.8954
Training complete in 1m 7s
Best val Acc: 0.921569
visualize_model(model_ft)
在這里,我們需要凍結(jié)除最后一層之外的所有網(wǎng)絡(luò)。 我們需要設(shè)置requires_grad == False
凍結(jié)參數(shù),以便不在backward()
中計(jì)算梯度。
您可以在的文檔中閱讀有關(guān)此內(nèi)容的更多信息。
model_conv = torchvision.models.resnet18(pretrained=True)
for param in model_conv.parameters():
param.requires_grad = False
## Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv.fc.in_features
model_conv.fc = nn.Linear(num_ftrs, 2)
model_conv = model_conv.to(device)
criterion = nn.CrossEntropyLoss()
## Observe that only parameters of final layer are being optimized as
## opposed to before.
optimizer_conv = optim.SGD(model_conv.fc.parameters(), lr=0.001, momentum=0.9)
## Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=7, gamma=0.1)
與以前的方案相比,在 CPU 上將花費(fèi)大約一半的時(shí)間。 這是可以預(yù)期的,因?yàn)椴恍枰獮榇蠖鄶?shù)網(wǎng)絡(luò)計(jì)算梯度。 但是,確實(shí)需要計(jì)算正向。
model_conv = train_model(model_conv, criterion, optimizer_conv,
exp_lr_scheduler, num_epochs=25)
得出:
Epoch 0/24
----------
train Loss: 0.5633 Acc: 0.7008
val Loss: 0.2159 Acc: 0.9412
Epoch 1/24
----------
train Loss: 0.4394 Acc: 0.7623
val Loss: 0.2000 Acc: 0.9150
Epoch 2/24
----------
train Loss: 0.5182 Acc: 0.7623
val Loss: 0.1897 Acc: 0.9346
Epoch 3/24
----------
train Loss: 0.3993 Acc: 0.8074
val Loss: 0.3029 Acc: 0.8824
Epoch 4/24
----------
train Loss: 0.4163 Acc: 0.8607
val Loss: 0.2190 Acc: 0.9412
Epoch 5/24
----------
train Loss: 0.4741 Acc: 0.7951
val Loss: 0.1903 Acc: 0.9477
Epoch 6/24
----------
train Loss: 0.4266 Acc: 0.8115
val Loss: 0.2178 Acc: 0.9281
Epoch 7/24
----------
train Loss: 0.3623 Acc: 0.8238
val Loss: 0.2080 Acc: 0.9412
Epoch 8/24
----------
train Loss: 0.3979 Acc: 0.8279
val Loss: 0.1796 Acc: 0.9412
Epoch 9/24
----------
train Loss: 0.3534 Acc: 0.8648
val Loss: 0.2043 Acc: 0.9412
Epoch 10/24
----------
train Loss: 0.3849 Acc: 0.8115
val Loss: 0.2012 Acc: 0.9346
Epoch 11/24
----------
train Loss: 0.3814 Acc: 0.8361
val Loss: 0.2088 Acc: 0.9412
Epoch 12/24
----------
train Loss: 0.3443 Acc: 0.8648
val Loss: 0.1823 Acc: 0.9477
Epoch 13/24
----------
train Loss: 0.2931 Acc: 0.8525
val Loss: 0.1853 Acc: 0.9477
Epoch 14/24
----------
train Loss: 0.2749 Acc: 0.8811
val Loss: 0.2068 Acc: 0.9412
Epoch 15/24
----------
train Loss: 0.3387 Acc: 0.8566
val Loss: 0.2080 Acc: 0.9477
Epoch 16/24
----------
train Loss: 0.2992 Acc: 0.8648
val Loss: 0.2096 Acc: 0.9346
Epoch 17/24
----------
train Loss: 0.3396 Acc: 0.8648
val Loss: 0.1870 Acc: 0.9412
Epoch 18/24
----------
train Loss: 0.3956 Acc: 0.8320
val Loss: 0.1858 Acc: 0.9412
Epoch 19/24
----------
train Loss: 0.3379 Acc: 0.8402
val Loss: 0.1729 Acc: 0.9542
Epoch 20/24
----------
train Loss: 0.2555 Acc: 0.8811
val Loss: 0.2186 Acc: 0.9281
Epoch 21/24
----------
train Loss: 0.3764 Acc: 0.8484
val Loss: 0.1817 Acc: 0.9477
Epoch 22/24
----------
train Loss: 0.2747 Acc: 0.8975
val Loss: 0.2042 Acc: 0.9412
Epoch 23/24
----------
train Loss: 0.3072 Acc: 0.8689
val Loss: 0.1924 Acc: 0.9477
Epoch 24/24
----------
train Loss: 0.3479 Acc: 0.8402
val Loss: 0.1835 Acc: 0.9477
Training complete in 0m 34s
Best val Acc: 0.954248
visualize_model(model_conv)
plt.ioff()
plt.show()
如果您想了解有關(guān)遷移學(xué)習(xí)的更多信息,請(qǐng)查看我們的計(jì)算機(jī)視覺(jué)教程的量化遷移學(xué)習(xí)。
腳本的總運(yùn)行時(shí)間:(1 分鐘 53.551 秒)
Download Python source code: transfer_learning_tutorial.py
Download Jupyter notebook: transfer_learning_tutorial.ipynb
由獅身人面像畫(huà)廊生成的畫(huà)廊
更多建議: