Last active
June 29, 2019 05:21
-
-
Save mukulkhanna/2238b561aca1c29878310fe322f1ba54 to your computer and use it in GitHub Desktop.
Dense net gist from https://github.com/gpleiss/efficient_densenet_pytorch/blob/master/models/densenet.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| class DenseNet(nn.Module): | |
| r"""Densenet-BC model class, based on | |
| `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` | |
| Args: | |
| growth_rate (int) - how many filters to add each layer (`k` in paper) | |
| block_config (list of 3 or 4 ints) - how many layers in each pooling block | |
| num_init_features (int) - the number of filters to learn in the first convolution layer | |
| bn_size (int) - multiplicative factor for number of bottle neck layers | |
| (i.e. bn_size * k features in the bottleneck layer) | |
| drop_rate (float) - dropout rate after each dense layer | |
| num_classes (int) - number of classification classes | |
| """ | |
| def __init__(self, growth_rate=12, block_config=(16, 16, 16), compression=0.5, | |
| num_init_features=24, bn_size=4, drop_rate=0, | |
| num_classes=10): | |
| super(DenseNet, self).__init__() | |
| assert 0 < compression <= 1, 'compression of densenet should be between 0 and 1' | |
| self.avgpool_size = 8 if small_inputs else 7 | |
| # First convolution | |
| self.features = nn.Sequential(OrderedDict([ | |
| ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), | |
| ])) | |
| self.features.add_module('norm0', nn.BatchNorm2d(num_init_features)) | |
| self.features.add_module('relu0', nn.ReLU(inplace=True)) | |
| self.features.add_module('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1, | |
| ceil_mode=False)) | |
| # Each denseblock | |
| num_features = num_init_features | |
| for i, num_layers in enumerate(block_config): | |
| block = _DenseBlock( | |
| num_layers=num_layers, | |
| num_input_features=num_features, | |
| bn_size=bn_size, | |
| growth_rate=growth_rate, | |
| drop_rate=drop_rate | |
| ) | |
| self.features.add_module('denseblock%d' % (i + 1), block) | |
| num_features = num_features + num_layers * growth_rate | |
| if i != len(block_config) - 1: | |
| trans = _Transition(num_input_features=num_features, | |
| num_output_features=int(num_features * compression)) | |
| self.features.add_module('transition%d' % (i + 1), trans) | |
| num_features = int(num_features * compression) | |
| # Final batch norm | |
| self.features.add_module('norm_final', nn.BatchNorm2d(num_features)) | |
| # Linear layer | |
| self.classifier = nn.Linear(num_features, num_classes) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment