Skip to content
This repository was archived by the owner on May 7, 2020. It is now read-only.

Commit df3babb

Browse files
committed
Use new TransposeConv2DLayer of Lasagne instead of mine
1 parent fd9d6c2 commit df3babb

File tree

1 file changed

+6
-105
lines changed

1 file changed

+6
-105
lines changed

layers.py

Lines changed: 6 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,8 @@
22

33
import numpy as np
44
import lasagne
5-
from lasagne.layers import get_all_layers, get_output, get_output_shape
6-
from lasagne.layers.conv import TransposeConv2DLayer as DeconvLayer
7-
from theano.sandbox.cuda.basic_ops import gpu_contiguous
5+
from lasagne.layers import get_output, get_output_shape
6+
from lasagne.layers.conv import TransposedConv2DLayer
87
import theano.tensor as T
98

109
from padded import DynamicPaddingLayer, PaddedConv2DLayer as ConvLayer
@@ -328,7 +327,7 @@ def __init__(self,
328327
l_upsampling = l_renet
329328
for l in range(nlayers):
330329
target_shape = target_shape * up_ratio
331-
l_upsampling = DeconvLayer(
330+
l_upsampling = TransposedConv2DLayer(
332331
l_upsampling,
333332
num_filters=out_nfilters[l],
334333
filter_size=filter_size,
@@ -346,7 +345,7 @@ def __init__(self,
346345
out_nfilters[l], out_shape))
347346

348347
# CROP
349-
# pad in DeconvLayer cannot be a tensor --> we cannot
348+
# pad in TransposeConv2DLayer cannot be a tensor --> we cannot
350349
# crop unless we know in advance by how much!
351350
crop = T.max(T.stack([up_shape - target_shape, T.zeros(2)]),
352351
axis=0)
@@ -364,12 +363,12 @@ def __init__(self,
364363
l_upsampling = l_renet
365364
for i, (nf, f_size, stride) in enumerate(zip(
366365
out_nfilters, out_filters_size, out_filters_stride)):
367-
l_upsampling = DeconvLayer(
366+
l_upsampling = TransposedConv2DLayer(
368367
l_upsampling,
369368
num_filters=nf,
370369
filter_size=f_size,
371370
stride=stride,
372-
pad=0,
371+
crop=0,
373372
W=out_W_init,
374373
b=out_b_init,
375374
nonlinearity=out_nonlinearity)
@@ -988,104 +987,6 @@ def get_output_shape_for(self, input_shape):
988987
self.nclasses)
989988

990989

991-
class asdDeconvLayer(lasagne.layers.Layer):
992-
"""An upsampling Layer that transposes a convolution
993-
994-
This layer upsamples its input using the transpose of a convolution,
995-
also known as fractional convolution in some contexts.
996-
997-
Notes
998-
-----
999-
Expects the input to be in format: batchsize, channels, rows, cols
1000-
"""
1001-
def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0,
1002-
untie_biases=False, W=lasagne.init.GlorotUniform(),
1003-
b=lasagne.init.Constant(0.), nonlinearity=None,
1004-
flip_filters=False, **kwargs):
1005-
super(asdDeconvLayer, self).__init__(incoming, **kwargs)
1006-
self.num_filters = num_filters
1007-
self.filter_size = lasagne.utils.as_tuple(filter_size, 2, int)
1008-
self.stride = lasagne.utils.as_tuple(stride, 2, int)
1009-
self.pad = lasagne.utils.as_tuple(pad, 2, int)
1010-
self.untie_biases = untie_biases
1011-
self.flip_filters = flip_filters
1012-
W_shape = (self.input_shape[1], num_filters) + self.filter_size
1013-
self.W_shape = W_shape
1014-
self.W = self.add_param(W, W_shape, name="W")
1015-
if b is None:
1016-
self.b = None
1017-
else:
1018-
if self.untie_biases:
1019-
biases_shape = (num_filters, self.output_shape[2],
1020-
self.output_shape[3])
1021-
else:
1022-
biases_shape = (num_filters,)
1023-
self.b = self.add_param(b, biases_shape, name="b",
1024-
regularizable=False)
1025-
1026-
if nonlinearity is None:
1027-
nonlinearity = lasagne.nonlinearities.identity
1028-
self.nonlinearity = nonlinearity
1029-
1030-
def get_output_shape_for(self, input_shape):
1031-
batch_size = input_shape[0]
1032-
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * 2
1033-
1034-
output_rows = get_deconv_size(input_shape[2],
1035-
self.filter_size[0],
1036-
self.stride[0],
1037-
pad[0])
1038-
1039-
output_columns = get_deconv_size(input_shape[3],
1040-
self.filter_size[1],
1041-
self.stride[1],
1042-
pad[1])
1043-
1044-
return (batch_size, self.num_filters, output_rows, output_columns)
1045-
1046-
def get_output_for(self, input_arr, **kwargs):
1047-
filters = gpu_contiguous(self.W)
1048-
input_arr = gpu_contiguous(input_arr)
1049-
in_shape = get_output(self.input_layer).shape
1050-
out_shape = get_deconv_size(in_shape[2:], self.filter_size,
1051-
self.stride, self.pad)
1052-
1053-
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(
1054-
imshp=(None,) * 4,
1055-
kshp=self.W_shape,
1056-
border_mode=self.pad,
1057-
subsample=self.stride,
1058-
filter_flip=self.flip_filters)
1059-
grad = op(filters, input_arr, out_shape)
1060-
1061-
if self.b is None:
1062-
activation = grad
1063-
elif self.untie_biases:
1064-
activation = grad + self.b.dimshuffle('x', 0, 1, 2)
1065-
else:
1066-
activation = grad + self.b.dimshuffle('x', 0, 'x', 'x')
1067-
return self.nonlinearity(activation)
1068-
1069-
1070-
def get_deconv_size(input_size, filter_size, stride, pad):
1071-
if input_size is None:
1072-
return None
1073-
input_size = np.array(input_size)
1074-
filter_size = np.array(filter_size)
1075-
stride = np.array(stride)
1076-
if isinstance(pad, (int, Iterable)) and not isinstance(pad, str):
1077-
pad = np.array(pad)
1078-
output_size = (input_size - 1) * stride + filter_size - 2*pad
1079-
1080-
elif pad == 'full':
1081-
output_size = input_size * stride - filter_size - stride + 2
1082-
elif pad == 'valid':
1083-
output_size = (input_size - 1) * stride + filter_size
1084-
elif pad == 'same':
1085-
output_size = input_size
1086-
return output_size
1087-
1088-
1089990
class CropLayer(lasagne.layers.Layer):
1090991
def __init__(self, l_in, crop, data_format='bc01', centered=True,
1091992
**kwargs):

0 commit comments

Comments
 (0)