这篇文章主要记录 深度学习——nn.MaxPool2d 的使用 的学习过程,方便后续快速回顾核心概念、代码写法与实验细节。

核心代码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45

import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# 池化的作用是为了:降维、去除冗余信息、对特征进行压缩、简化网络复杂度、减小计算量、减小内存消耗

# 定义神经网络操作
class Mypool(nn.Module):
def __init__(self):
super(Mypool, self).__init__()
# 池化操作
# self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=True)
self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=False)

def forward(self, input):
output = self.maxpool1(input)
return output

# 获取数据
dataset = torchvision.datasets.CIFAR10(root="data-maxpool", train=False, download=False,
transform=torchvision.transforms.ToTensor())
dataLoader = DataLoader(dataset, batch_size=64, shuffle=True)

# 定义变量等
mypool = Mypool()
writer = SummaryWriter("log-maxpool")
step = 0

# 写入,观察池化结果
for data in dataLoader:
img, tag = data
# 池化前
writer.add_images("input", img, step)
output = mypool(img)
# 池化后
writer.add_images("output", output, step)
step = step + 1

# 关闭写入流
writer.close()