Is there any way, I can print the summary of a model in PyTorch like model.summary()
method does in Keras as follows?
Model Summary:
___________
Keras like model summary using torchsummary:
from torchsummary import summary
summary(model, input_size=(3, 224, 224))
In order to use torchsummary type:
from torchsummary import summary
Install it first if you don't have it.
pip install torchsummary
And then you can try it, but note from some reason it is not working unless I set model to cuda alexnet.cuda
:
from torchsummary import summary
help(summary)
import torchvision.models as models
alexnet = models.alexnet(pretrained=False)
alexnet.cuda()
summary(alexnet, (3, 224, 224))
print(alexnet)
The summary
must take the input size and batch size is set to -1 meaning any batch size we provide.
If we set summary(alexnet, (3, 224, 224), 32)
this means use the bs=32
.
summary(model, input_size, batch_size=-1, device='cuda')
Out:
Help on function summary in module torchsummary.torchsummary:
summary(model, input_size, batch_size=-1, device='cuda')
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [32, 64, 55, 55] 23,296
ReLU-2 [32, 64, 55, 55] 0
MaxPool2d-3 [32, 64, 27, 27] 0
Conv2d-4 [32, 192, 27, 27] 307,392
ReLU-5 [32, 192, 27, 27] 0
MaxPool2d-6 [32, 192, 13, 13] 0
Conv2d-7 [32, 384, 13, 13] 663,936
ReLU-8 [32, 384, 13, 13] 0
Conv2d-9 [32, 256, 13, 13] 884,992
ReLU-10 [32, 256, 13, 13] 0
Conv2d-11 [32, 256, 13, 13] 590,080
ReLU-12 [32, 256, 13, 13] 0
MaxPool2d-13 [32, 256, 6, 6] 0
AdaptiveAvgPool2d-14 [32, 256, 6, 6] 0
Dropout-15 [32, 9216] 0
Linear-16 [32, 4096] 37,752,832
ReLU-17 [32, 4096] 0
Dropout-18 [32, 4096] 0
Linear-19 [32, 4096] 16,781,312
ReLU-20 [32, 4096] 0
Linear-21 [32, 1000] 4,097,000
================================================================
Total params: 61,100,840
Trainable params: 61,100,840
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 18.38
Forward/backward pass size (MB): 268.12
Params size (MB): 233.08
Estimated Total Size (MB): 519.58
----------------------------------------------------------------
AlexNet(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
(1): ReLU(inplace)
(2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(4): ReLU(inplace)
(5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
(6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(7): ReLU(inplace)
(8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(9): ReLU(inplace)
(10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(6, 6))
(classifier): Sequential(
(0): Dropout(p=0.5)
(1): Linear(in_features=9216, out_features=4096, bias=True)
(2): ReLU(inplace)
(3): Dropout(p=0.5)
(4): Linear(in_features=4096, out_features=4096, bias=True)
(5): ReLU(inplace)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
This will show a model's weights and parameters (but not output shape).
from torch.nn.modules.module import _addindent
import torch
import numpy as np
def torch_summarize(model, show_weights=True, show_parameters=True):
"""Summarizes torch model by showing trainable parameters and weights."""
tmpstr = model.__class__.__name__ + ' (\n'
for key, module in model._modules.items():
# if it contains layers let call it recursively to get params and weights
if type(module) in [
torch.nn.modules.container.Container,
torch.nn.modules.container.Sequential
]:
modstr = torch_summarize(module)
else:
modstr = module.__repr__()
modstr = _addindent(modstr, 2)
params = sum([np.prod(p.size()) for p in module.parameters()])
weights = tuple([tuple(p.size()) for p in module.parameters()])
tmpstr += ' (' + key + '): ' + modstr
if show_weights:
tmpstr += ', weights={}'.format(weights)
if show_parameters:
tmpstr += ', parameters={}'.format(params)
tmpstr += '\n'
tmpstr = tmpstr + ')'
return tmpstr
# Test
import torchvision.models as models
model = models.alexnet()
print(torch_summarize(model))
# # Output
# AlexNet (
# (features): Sequential (
# (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2)), weights=((64, 3, 11, 11), (64,)), parameters=23296
# (1): ReLU (inplace), weights=(), parameters=0
# (2): MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)), weights=(), parameters=0
# (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2)), weights=((192, 64, 5, 5), (192,)), parameters=307392
# (4): ReLU (inplace), weights=(), parameters=0
# (5): MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)), weights=(), parameters=0
# (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), weights=((384, 192, 3, 3), (384,)), parameters=663936
# (7): ReLU (inplace), weights=(), parameters=0
# (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), weights=((256, 384, 3, 3), (256,)), parameters=884992
# (9): ReLU (inplace), weights=(), parameters=0
# (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)), weights=((256, 256, 3, 3), (256,)), parameters=590080
# (11): ReLU (inplace), weights=(), parameters=0
# (12): MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)), weights=(), parameters=0
# ), weights=((64, 3, 11, 11), (64,), (192, 64, 5, 5), (192,), (384, 192, 3, 3), (384,), (256, 384, 3, 3), (256,), (256, 256, 3, 3), (256,)), parameters=2469696
# (classifier): Sequential (
# (0): Dropout (p = 0.5), weights=(), parameters=0
# (1): Linear (9216 -> 4096), weights=((4096, 9216), (4096,)), parameters=37752832
# (2): ReLU (inplace), weights=(), parameters=0
# (3): Dropout (p = 0.5), weights=(), parameters=0
# (4): Linear (4096 -> 4096), weights=((4096, 4096), (4096,)), parameters=16781312
# (5): ReLU (inplace), weights=(), parameters=0
# (6): Linear (4096 -> 1000), weights=((1000, 4096), (1000,)), parameters=4097000
# ), weights=((4096, 9216), (4096,), (4096, 4096), (4096,), (1000, 4096), (1000,)), parameters=58631144
# )
Edit: isaykatsman has a pytorch PR to add a model.summary()
that is exactly like keras https://github.com/pytorch/pytorch/pull/3043/files
AFAK there is no model.summary() like equivalent in pytorch
Meanwhile you can refer script by szagoruyko, which gives a nice visualizaton like in resnet18-example
Cheers
You can just use x.shape
, in order to measure tensor's x
dimensions
how does it work if I put the following inputs with the following shapes into my model? 350 is the batch_size and the 850 is just an integer.
input_a: torch.Size([350, 6556])
input_b: torch.Size([350, 6556])
input_c: torch.Size([350])
input_d: torch.Size([20, 6556])
input_e: 850