-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathmodel.py
153 lines (120 loc) · 5.63 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#/usr/bin/python3
# -*- coding: utf-8 -*-
#####################################
# https://arxiv.org/abs/1302.4389 #
#####################################
# Library modules
import json
# External library modules
import torch
# local library modules
from maxout import MaxoutMLP
from maxout import MaxoutConv
from utils import init_hyper_params
from utils import num_corrects
from utils import device
class MaxoutMLPMNIST(torch.nn.Module):
"""MLP + dropout"""
def __init__(self, input_dim=784):
"""
Define maxout layers to train MNIST dataset
:param input_dim: Input dimension to the model
For mnist case :py:const:28: `x` :py:const:28: dimension
images.
For this model `2d` convolution is reshaped
to create `1d` pixels.
:type input_dim: :py:obj:`int`
"""
super(MaxoutMLPMNIST, self).__init__()
# parameters initialization
self.hparams = init_hyper_params()
# dummy tensors for upper bound after norm
self.norm_upper1 = torch.empty(self.hparams['mlp'][0]['neurons']).\
fill_(self.hparams['norm_constraint']).to(device)
self.norm_upper2 = torch.empty(self.hparams['mlp'][1]['neurons']).\
fill_(self.hparams['norm_constraint']).to(device)
# Maxout Layer 1 (input_size, num_layers, num_neurons)
self.maxout1 = MaxoutMLP(input_dim,
self.hparams['mlp'][0]['layers'],
self.hparams['mlp'][0]['neurons']).to(device)
# Maxout Layer 2 (input_size, num_layers, num_neurons)
self.maxout2 = MaxoutMLP(self.hparams['mlp'][0]['neurons'],
self.hparams['mlp'][1]['layers'],
self.hparams['mlp'][1]['neurons']).to(device)
def forward(self, input_imgs, is_train=True):
"""
Function to forward inputs to the maxout network
The norms and it's constraint are only added for MNIST dataset
as described in paper.
:param input_imgs: Input images to the model
:type input_imgs: :py:class:`torch.Tensor`
:param is_train: Whether the input is for training.
:type is_train: :py:obj:`bool`
"""
dropout = torch.nn.Dropout(p=0.2).to(device)
# Maxout layer1 + dropout
mx1_out = self.maxout1(input_imgs, is_norm=True,
norm_constraint=self.hparams['norm_constraint'],
norm_upper=self.norm_upper1)
if is_train:
mx1_out = dropout(mx1_out)
# Maxout layer2 + dropout
mx2_out = self.maxout2(mx1_out, is_norm=True,
norm_constraint=self.hparams['norm_constraint'],
norm_upper=self.norm_upper2)
# Softmax layer
softmax = torch.nn.Softmax()
softmax = softmax(mx2_out)
return softmax
class MaxoutConvMNIST(torch.nn.Module):
def __init__(self, in_channels=1):
"""
Define Maxout, Maxpool and Linear Layers for the model
:param in_channels: Number of channel of input image
:type in_channels: :py:obj:`int`
"""
super(MaxoutConvMNIST, self).__init__()
# parameters initialization
self.hparams = init_hyper_params()
# Maxout Layer 1 (in_channels, out_channels, kernel)
self.maxout1 = MaxoutConv(in_channels=in_channels,
out_channels=self.hparams['conv'][0]['channels'],
kernel_size=self.hparams['conv'][0]['kernel'],
padding=self.hparams['padding'][0]).to(device)
self.maxpool1 = torch.nn.MaxPool2d(self.hparams['pool'][0],
self.hparams['stride'][0])
# Maxout Layer 2 (in_channels, out_channels, kernel)
self.maxout2 = MaxoutConv(in_channels=1,
out_channels=self.hparams['conv'][1]['channels'],
kernel_size=self.hparams['conv'][1]['kernel'],
padding=self.hparams['padding'][1]).to(device)
self.maxpool2 = torch.nn.MaxPool2d(self.hparams['pool'][1],
self.hparams['stride'][1])
# Maxout Layer 3 (in_channels, out_channels, kernel)
self.maxout3 = MaxoutConv(in_channels=1,
out_channels=self.hparams['conv'][2]['channels'],
kernel_size=self.hparams['conv'][2]['kernel'],
padding=self.hparams['padding'][2]).to(device)
self.maxpool3 = torch.nn.MaxPool2d(self.hparams['pool'][2],
self.hparams['stride'][2])
self.linear = torch.nn.Linear(self.hparams['linear']['in_channels'],
self.hparams['linear']['out_channels'])
def forward(self, _input, is_train=True):
"""
Pass the input to the whole model
:param _input: input image
:type _input: :py:class:`torch.Tensor`
"""
# Maxout1 + Maxpool1
out = self.maxout1(_input, is_norm=True)
out = self.maxpool1(out)
# Maxout2 + Maxpool2
out = self.maxout2(out, is_norm=True)
out = self.maxpool2(out)
# Maxout3 + Maxpool3
out = self.maxout3(out, is_norm=True)
out = self.maxpool3(out)
out = out.view(out.shape[0], -1)
# linear
out = self.linear(out)
return torch.nn.Softmax()(out)