Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/layers/convolutional/separable_conv2d.py: 58%
24 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Keras depthwise separable 2D convolution."""
18import tensorflow.compat.v2 as tf
20from keras.src import activations
21from keras.src import constraints
22from keras.src import initializers
23from keras.src import regularizers
24from keras.src.layers.convolutional.base_separable_conv import SeparableConv
25from keras.src.utils import conv_utils
27# isort: off
28from tensorflow.python.util.tf_export import keras_export
31@keras_export(
32 "keras.layers.SeparableConv2D", "keras.layers.SeparableConvolution2D"
33)
34class SeparableConv2D(SeparableConv):
35 """Depthwise separable 2D convolution.
37 Separable convolutions consist of first performing
38 a depthwise spatial convolution
39 (which acts on each input channel separately)
40 followed by a pointwise convolution which mixes the resulting
41 output channels. The `depth_multiplier` argument controls how many
42 output channels are generated per input channel in the depthwise step.
44 Intuitively, separable convolutions can be understood as
45 a way to factorize a convolution kernel into two smaller kernels,
46 or as an extreme version of an Inception block.
48 Args:
49 filters: Integer, the dimensionality of the output space
50 (i.e. the number of output filters in the convolution).
51 kernel_size: An integer or tuple/list of 2 integers, specifying the
52 height and width of the 2D convolution window.
53 Can be a single integer to specify the same value for
54 all spatial dimensions.
55 strides: An integer or tuple/list of 2 integers,
56 specifying the strides of the convolution along the height and width.
57 Can be a single integer to specify the same value for
58 all spatial dimensions. Current implementation only supports equal
59 length strides in the row and column dimensions.
60 Specifying any stride value != 1 is incompatible with specifying
61 any `dilation_rate` value != 1.
62 padding: one of `"valid"` or `"same"` (case-insensitive).
63 `"valid"` means no padding. `"same"` results in padding with zeros
64 evenly to the left/right or up/down of the input such that output has
65 the same height/width dimension as the input.
66 data_format: A string,
67 one of `channels_last` (default) or `channels_first`.
68 The ordering of the dimensions in the inputs.
69 `channels_last` corresponds to inputs with shape
70 `(batch_size, height, width, channels)` while `channels_first`
71 corresponds to inputs with shape
72 `(batch_size, channels, height, width)`.
73 When unspecified, uses `image_data_format` value found in your Keras
74 config file at `~/.keras/keras.json` (if exists) else 'channels_last'.
75 Defaults to 'channels_last'.
76 dilation_rate: An integer or tuple/list of 2 integers, specifying
77 the dilation rate to use for dilated convolution.
78 depth_multiplier: The number of depthwise convolution output channels
79 for each input channel.
80 The total number of depthwise convolution output
81 channels will be equal to `filters_in * depth_multiplier`.
82 activation: Activation function to use.
83 If you don't specify anything, no activation is applied
84 (see `keras.activations`).
85 use_bias: Boolean, whether the layer uses a bias vector.
86 depthwise_initializer: An initializer for the depthwise convolution kernel
87 (see `keras.initializers`). If None, then the default initializer
88 ('glorot_uniform') will be used.
89 pointwise_initializer: An initializer for the pointwise convolution kernel
90 (see `keras.initializers`). If None, then the default initializer
91 ('glorot_uniform') will be used.
92 bias_initializer: An initializer for the bias vector. If None, the default
93 initializer ('zeros') will be used (see `keras.initializers`).
94 depthwise_regularizer: Regularizer function applied to
95 the depthwise kernel matrix (see `keras.regularizers`).
96 pointwise_regularizer: Regularizer function applied to
97 the pointwise kernel matrix (see `keras.regularizers`).
98 bias_regularizer: Regularizer function applied to the bias vector
99 (see `keras.regularizers`).
100 activity_regularizer: Regularizer function applied to
101 the output of the layer (its "activation")
102 (see `keras.regularizers`).
103 depthwise_constraint: Constraint function applied to
104 the depthwise kernel matrix
105 (see `keras.constraints`).
106 pointwise_constraint: Constraint function applied to
107 the pointwise kernel matrix
108 (see `keras.constraints`).
109 bias_constraint: Constraint function applied to the bias vector
110 (see `keras.constraints`).
112 Input shape:
113 4D tensor with shape:
114 `(batch_size, channels, rows, cols)` if data_format='channels_first'
115 or 4D tensor with shape:
116 `(batch_size, rows, cols, channels)` if data_format='channels_last'.
118 Output shape:
119 4D tensor with shape:
120 `(batch_size, filters, new_rows, new_cols)` if
121 data_format='channels_first'
122 or 4D tensor with shape:
123 `(batch_size, new_rows, new_cols, filters)` if
124 data_format='channels_last'. `rows` and `cols` values might have changed
125 due to padding.
127 Returns:
128 A tensor of rank 4 representing
129 `activation(separableconv2d(inputs, kernel) + bias)`.
131 Raises:
132 ValueError: if `padding` is "causal".
133 """
135 def __init__(
136 self,
137 filters,
138 kernel_size,
139 strides=(1, 1),
140 padding="valid",
141 data_format=None,
142 dilation_rate=(1, 1),
143 depth_multiplier=1,
144 activation=None,
145 use_bias=True,
146 depthwise_initializer="glorot_uniform",
147 pointwise_initializer="glorot_uniform",
148 bias_initializer="zeros",
149 depthwise_regularizer=None,
150 pointwise_regularizer=None,
151 bias_regularizer=None,
152 activity_regularizer=None,
153 depthwise_constraint=None,
154 pointwise_constraint=None,
155 bias_constraint=None,
156 **kwargs
157 ):
158 super().__init__(
159 rank=2,
160 filters=filters,
161 kernel_size=kernel_size,
162 strides=strides,
163 padding=padding,
164 data_format=data_format,
165 dilation_rate=dilation_rate,
166 depth_multiplier=depth_multiplier,
167 activation=activations.get(activation),
168 use_bias=use_bias,
169 depthwise_initializer=initializers.get(depthwise_initializer),
170 pointwise_initializer=initializers.get(pointwise_initializer),
171 bias_initializer=initializers.get(bias_initializer),
172 depthwise_regularizer=regularizers.get(depthwise_regularizer),
173 pointwise_regularizer=regularizers.get(pointwise_regularizer),
174 bias_regularizer=regularizers.get(bias_regularizer),
175 activity_regularizer=regularizers.get(activity_regularizer),
176 depthwise_constraint=constraints.get(depthwise_constraint),
177 pointwise_constraint=constraints.get(pointwise_constraint),
178 bias_constraint=constraints.get(bias_constraint),
179 **kwargs
180 )
182 def call(self, inputs):
183 # Apply the actual ops.
184 if self.data_format == "channels_last":
185 strides = (1,) + self.strides + (1,)
186 else:
187 strides = (1, 1) + self.strides
188 outputs = tf.compat.v1.nn.separable_conv2d(
189 inputs,
190 self.depthwise_kernel,
191 self.pointwise_kernel,
192 strides=strides,
193 padding=self.padding.upper(),
194 rate=self.dilation_rate,
195 data_format=conv_utils.convert_data_format(
196 self.data_format, ndim=4
197 ),
198 )
200 if self.use_bias:
201 outputs = tf.nn.bias_add(
202 outputs,
203 self.bias,
204 data_format=conv_utils.convert_data_format(
205 self.data_format, ndim=4
206 ),
207 )
209 if self.activation is not None:
210 return self.activation(outputs)
211 return outputs
214# Alias
216SeparableConvolution2D = SeparableConv2D