Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/layers/convolutional/conv1d_transpose.py: 25%

79 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""Keras 1D transposed convolution layer (sometimes called deconvolution).""" 

16 

17 

18import tensorflow.compat.v2 as tf 

19 

20from keras.src import activations 

21from keras.src import constraints 

22from keras.src import initializers 

23from keras.src import regularizers 

24from keras.src.dtensor import utils 

25from keras.src.engine.input_spec import InputSpec 

26from keras.src.layers.convolutional.conv1d import Conv1D 

27from keras.src.utils import conv_utils 

28 

29# isort: off 

30from tensorflow.python.util.tf_export import keras_export 

31 

32 

33@keras_export( 

34 "keras.layers.Conv1DTranspose", "keras.layers.Convolution1DTranspose" 

35) 

36class Conv1DTranspose(Conv1D): 

37 """Transposed convolution layer (sometimes called Deconvolution). 

38 

39 The need for transposed convolutions generally arises 

40 from the desire to use a transformation going in the opposite direction 

41 of a normal convolution, i.e., from something that has the shape of the 

42 output of some convolution to something that has the shape of its input 

43 while maintaining a connectivity pattern that is compatible with 

44 said convolution. 

45 

46 When using this layer as the first layer in a model, 

47 provide the keyword argument `input_shape` 

48 (tuple of integers or `None`, does not include the sample axis), 

49 e.g. `input_shape=(128, 3)` for data with 128 time steps and 3 channels. 

50 

51 Args: 

52 filters: Integer, the dimensionality of the output space 

53 (i.e. the number of output filters in the convolution). 

54 kernel_size: An integer length of the 1D convolution window. 

55 strides: An integer specifying the stride of the convolution along the 

56 time dimension. Specifying a stride value != 1 is incompatible with 

57 specifying a `dilation_rate` value != 1. Defaults to 1. 

58 padding: one of `"valid"` or `"same"` (case-insensitive). 

59 `"valid"` means no padding. `"same"` results in padding with zeros 

60 evenly to the left/right or up/down of the input such that output has 

61 the same height/width dimension as the input. 

62 output_padding: An integer specifying the amount of padding along 

63 the time dimension of the output tensor. 

64 The amount of output padding must be lower than the stride. 

65 If set to `None` (default), the output shape is inferred. 

66 data_format: A string, one of `channels_last` (default) or 

67 `channels_first`. The ordering of the dimensions in the inputs. 

68 `channels_last` corresponds to inputs with shape 

69 `(batch_size, length, channels)` while `channels_first` corresponds to 

70 inputs with shape `(batch_size, channels, length)`. 

71 dilation_rate: an integer, specifying 

72 the dilation rate to use for dilated convolution. 

73 Currently, specifying a `dilation_rate` value != 1 is 

74 incompatible with specifying a stride value != 1. 

75 Also dilation rate larger than 1 is not currently supported. 

76 activation: Activation function to use. 

77 If you don't specify anything, no activation is applied 

78 (see `keras.activations`). 

79 use_bias: Boolean, whether the layer uses a bias vector. 

80 kernel_initializer: Initializer for the `kernel` weights matrix 

81 (see `keras.initializers`). Defaults to 'glorot_uniform'. 

82 bias_initializer: Initializer for the bias vector 

83 (see `keras.initializers`). Defaults to 'zeros'. 

84 kernel_regularizer: Regularizer function applied to 

85 the `kernel` weights matrix (see `keras.regularizers`). 

86 bias_regularizer: Regularizer function applied to the bias vector 

87 (see `keras.regularizers`). 

88 activity_regularizer: Regularizer function applied to 

89 the output of the layer (its "activation") (see `keras.regularizers`). 

90 kernel_constraint: Constraint function applied to the kernel matrix 

91 (see `keras.constraints`). 

92 bias_constraint: Constraint function applied to the bias vector 

93 (see `keras.constraints`). 

94 

95 Input shape: 

96 3D tensor with shape: 

97 `(batch_size, steps, channels)` 

98 

99 Output shape: 

100 3D tensor with shape: 

101 `(batch_size, new_steps, filters)` 

102 If `output_padding` is specified: 

103 ``` 

104 new_timesteps = ((timesteps - 1) * strides + kernel_size - 

105 2 * padding + output_padding) 

106 ``` 

107 

108 Returns: 

109 A tensor of rank 3 representing 

110 `activation(conv1dtranspose(inputs, kernel) + bias)`. 

111 

112 Raises: 

113 ValueError: if `padding` is "causal". 

114 ValueError: when both `strides` > 1 and `dilation_rate` > 1. 

115 

116 References: 

117 - [A guide to convolution arithmetic for deep learning]( 

118 https://arxiv.org/abs/1603.07285v1) 

119 - [Deconvolutional Networks]( 

120 https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf) 

121 """ 

122 

123 @utils.allow_initializer_layout 

124 def __init__( 

125 self, 

126 filters, 

127 kernel_size, 

128 strides=1, 

129 padding="valid", 

130 output_padding=None, 

131 data_format=None, 

132 dilation_rate=1, 

133 activation=None, 

134 use_bias=True, 

135 kernel_initializer="glorot_uniform", 

136 bias_initializer="zeros", 

137 kernel_regularizer=None, 

138 bias_regularizer=None, 

139 activity_regularizer=None, 

140 kernel_constraint=None, 

141 bias_constraint=None, 

142 **kwargs, 

143 ): 

144 super().__init__( 

145 filters=filters, 

146 kernel_size=kernel_size, 

147 strides=strides, 

148 padding=padding, 

149 data_format=data_format, 

150 dilation_rate=dilation_rate, 

151 activation=activations.get(activation), 

152 use_bias=use_bias, 

153 kernel_initializer=initializers.get(kernel_initializer), 

154 bias_initializer=initializers.get(bias_initializer), 

155 kernel_regularizer=regularizers.get(kernel_regularizer), 

156 bias_regularizer=regularizers.get(bias_regularizer), 

157 activity_regularizer=regularizers.get(activity_regularizer), 

158 kernel_constraint=constraints.get(kernel_constraint), 

159 bias_constraint=constraints.get(bias_constraint), 

160 **kwargs, 

161 ) 

162 

163 self.output_padding = output_padding 

164 if self.output_padding is not None: 

165 self.output_padding = conv_utils.normalize_tuple( 

166 self.output_padding, 1, "output_padding", allow_zero=True 

167 ) 

168 for stride, out_pad in zip(self.strides, self.output_padding): 

169 if out_pad >= stride: 

170 raise ValueError( 

171 "Strides must be greater than output padding. " 

172 f"Received strides={self.strides}, " 

173 f"output_padding={self.output_padding}." 

174 ) 

175 

176 def build(self, input_shape): 

177 input_shape = tf.TensorShape(input_shape) 

178 if len(input_shape) != 3: 

179 raise ValueError( 

180 "Inputs should have rank 3. " 

181 f"Received input_shape={input_shape}." 

182 ) 

183 channel_axis = self._get_channel_axis() 

184 if input_shape.dims[channel_axis].value is None: 

185 raise ValueError( 

186 "The channel dimension of the inputs " 

187 "to `Conv1DTranspose` should be defined. " 

188 f"The input_shape received is {input_shape}, " 

189 f"where axis {channel_axis} (0-based) " 

190 "is the channel dimension, which found to be `None`." 

191 ) 

192 input_dim = int(input_shape[channel_axis]) 

193 self.input_spec = InputSpec(ndim=3, axes={channel_axis: input_dim}) 

194 kernel_shape = self.kernel_size + (self.filters, input_dim) 

195 

196 self.kernel = self.add_weight( 

197 name="kernel", 

198 shape=kernel_shape, 

199 initializer=self.kernel_initializer, 

200 regularizer=self.kernel_regularizer, 

201 constraint=self.kernel_constraint, 

202 trainable=True, 

203 dtype=self.dtype, 

204 ) 

205 if self.use_bias: 

206 self.bias = self.add_weight( 

207 name="bias", 

208 shape=(self.filters,), 

209 initializer=self.bias_initializer, 

210 regularizer=self.bias_regularizer, 

211 constraint=self.bias_constraint, 

212 trainable=True, 

213 dtype=self.dtype, 

214 ) 

215 else: 

216 self.bias = None 

217 self.built = True 

218 

219 def call(self, inputs): 

220 inputs_shape = tf.shape(inputs) 

221 batch_size = inputs_shape[0] 

222 if self.data_format == "channels_first": 

223 t_axis = 2 

224 else: 

225 t_axis = 1 

226 

227 length = inputs_shape[t_axis] 

228 if self.output_padding is None: 

229 output_padding = None 

230 else: 

231 output_padding = self.output_padding[0] 

232 

233 # Infer the dynamic output shape: 

234 out_length = conv_utils.deconv_output_length( 

235 length, 

236 self.kernel_size[0], 

237 padding=self.padding, 

238 output_padding=output_padding, 

239 stride=self.strides[0], 

240 dilation=self.dilation_rate[0], 

241 ) 

242 if self.data_format == "channels_first": 

243 output_shape = (batch_size, self.filters, out_length) 

244 else: 

245 output_shape = (batch_size, out_length, self.filters) 

246 data_format = conv_utils.convert_data_format(self.data_format, ndim=3) 

247 

248 output_shape_tensor = tf.stack(output_shape) 

249 outputs = tf.nn.conv1d_transpose( 

250 inputs, 

251 self.kernel, 

252 output_shape_tensor, 

253 strides=self.strides, 

254 padding=self.padding.upper(), 

255 data_format=data_format, 

256 dilations=self.dilation_rate, 

257 ) 

258 

259 if not tf.executing_eagerly() and inputs.shape.rank: 

260 # Infer the static output shape: 

261 out_shape = self.compute_output_shape(inputs.shape) 

262 outputs.set_shape(out_shape) 

263 

264 if self.use_bias: 

265 outputs = tf.nn.bias_add( 

266 outputs, self.bias, data_format=data_format 

267 ) 

268 

269 if self.activation is not None: 

270 return self.activation(outputs) 

271 return outputs 

272 

273 def compute_output_shape(self, input_shape): 

274 input_shape = tf.TensorShape(input_shape).as_list() 

275 output_shape = list(input_shape) 

276 if self.data_format == "channels_first": 

277 c_axis, t_axis = 1, 2 

278 else: 

279 c_axis, t_axis = 2, 1 

280 

281 if self.output_padding is None: 

282 output_padding = None 

283 else: 

284 output_padding = self.output_padding[0] 

285 output_shape[c_axis] = self.filters 

286 output_shape[t_axis] = conv_utils.deconv_output_length( 

287 output_shape[t_axis], 

288 self.kernel_size[0], 

289 padding=self.padding, 

290 output_padding=output_padding, 

291 stride=self.strides[0], 

292 dilation=self.dilation_rate[0], 

293 ) 

294 return tf.TensorShape(output_shape) 

295 

296 def get_config(self): 

297 config = super().get_config() 

298 config["output_padding"] = self.output_padding 

299 return config 

300 

301 

302# Alias 

303 

304Convolution1DTranspose = Conv1DTranspose 

305