self.flatten = nn.Flatten() self.line = nn.Linear(120,84) self.output = nn.Sequential( nn.Linear(in_features=84,out_features=2,bias=False), nn.Sigmoid(), ) defforward(self,x): x = self.RELU(self.conv1(x)) x = self.avgpool(x) x = self.RELU(self.conv2(x)) x = self.avgpool2(x) x = self.conv3(x) x = self.flatten(x) x = self.line(x) return self.output(x)
结构是稍微有一些改变的,比如:
激活函数我改用了RELU
输入层的通道数从单通道改为了三通道
不过这些都问题不大,不影响我们去可视化。
导包
1 2 3 4 5 6
import torch import matplotlib.pyplot as plt import cv2 as cv from torch.autograd import Variable from torchvision import transforms from LeNet import LeNet #我把网络结构的代码写到了LeNet.py,所以需要从另一个文件里导入
首先我们需要初始化我们的网络模型,并加载之前训练好的权重文件
1 2 3
device = torch.device('cuda'if torch.cuda.is_available() else'cpu') model = LeNet().eval() model.load_state_dict(torch.load("LeNet_model.pth",map_location="cpu"))
defgetConv(children): for child in children: if child.__class__.__name__ == "Conv2d": model_weights.append(child.weight) conv_layers.append(child) elif child.__class__.__name__=="Sequential": getConv(child.children()) getConv(model.children())
import torch import matplotlib.pyplot as plt import cv2 as cv from torch.autograd import Variable from torchvision import transforms from LeNet import LeNet
defgetConv(children): for child in children: if child.__class__.__name__ == "Conv2d": model_weights.append(child.weight) conv_layers.append(child) elif child.__class__.__name__=="Sequential": getConv(child.children())
if __name__ == '__main__': device = torch.device('cuda'if torch.cuda.is_available() else'cpu') model = LeNet().eval() model.load_state_dict(torch.load("LeNet_model.pth",map_location="cpu")) children = model.children() model_weights =[] conv_layers = [] getConv(children) model.to(device) transform = transforms.Compose( [ transforms.ToPILImage(), transforms.Resize((28,28)), transforms.ToTensor(), ] ) image = cv.imread("20160330_170504_473.jpg") transform_image = transform(image) x = Variable(torch.unsqueeze(transform_image,dim=0).float(),requires_grad = False) outputs = [] names = [] for layer in conv_layers: x = layer(x) outputs.append(x) names.append(layer.__class__.__name__) results = [] for output in outputs: output = output.squeeze(0) gray_scale = torch.sum(output,0)/output.shape[0] results.append(gray_scale.detach().numpy()) for result in results: plt.imshow(result) plt.show()