Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/layers/convolutional/separable_conv1d.py: 39%
36 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Keras depthwise separable 1D convolution."""
18import tensorflow.compat.v2 as tf
20from keras.src import activations
21from keras.src import constraints
22from keras.src import initializers
23from keras.src import regularizers
24from keras.src.layers.convolutional.base_separable_conv import SeparableConv
25from keras.src.utils import conv_utils
27# isort: off
28from tensorflow.python.util.tf_export import keras_export
31@keras_export(
32 "keras.layers.SeparableConv1D", "keras.layers.SeparableConvolution1D"
33)
34class SeparableConv1D(SeparableConv):
35 """Depthwise separable 1D convolution.
37 This layer performs a depthwise convolution that acts separately on
38 channels, followed by a pointwise convolution that mixes channels.
39 If `use_bias` is True and a bias initializer is provided,
40 it adds a bias vector to the output.
41 It then optionally applies an activation function to produce the final
42 output.
44 Args:
45 filters: Integer, the dimensionality of the output space (i.e. the number
46 of filters in the convolution).
47 kernel_size: A single integer specifying the spatial
48 dimensions of the filters.
49 strides: A single integer specifying the strides
50 of the convolution.
51 Specifying any `stride` value != 1 is incompatible with specifying
52 any `dilation_rate` value != 1.
53 padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
54 `"valid"` means no padding. `"same"` results in padding with zeros
55 evenly to the left/right or up/down of the input such that output has
56 the same height/width dimension as the input. `"causal"` results in
57 causal (dilated) convolutions, e.g. `output[t]` does not depend on
58 `input[t+1:]`.
59 data_format: A string, one of `channels_last` (default) or
60 `channels_first`. The ordering of the dimensions in the inputs.
61 `channels_last` corresponds to inputs with shape
62 `(batch_size, length, channels)` while `channels_first` corresponds to
63 inputs with shape `(batch_size, channels, length)`.
64 dilation_rate: A single integer, specifying
65 the dilation rate to use for dilated convolution.
66 depth_multiplier: The number of depthwise convolution output channels for
67 each input channel. The total number of depthwise convolution output
68 channels will be equal to `num_filters_in * depth_multiplier`.
69 activation: Activation function to use.
70 If you don't specify anything, no activation is applied
71 (see `keras.activations`).
72 use_bias: Boolean, whether the layer uses a bias.
73 depthwise_initializer: An initializer for the depthwise convolution kernel
74 (see `keras.initializers`). If None, then the default initializer
75 ('glorot_uniform') will be used.
76 pointwise_initializer: An initializer for the pointwise convolution kernel
77 (see `keras.initializers`). If None, then the default initializer
78 ('glorot_uniform') will be used.
79 bias_initializer: An initializer for the bias vector. If None, the default
80 initializer ('zeros') will be used (see `keras.initializers`).
81 depthwise_regularizer: Optional regularizer for the depthwise
82 convolution kernel (see `keras.regularizers`).
83 pointwise_regularizer: Optional regularizer for the pointwise
84 convolution kernel (see `keras.regularizers`).
85 bias_regularizer: Optional regularizer for the bias vector
86 (see `keras.regularizers`).
87 activity_regularizer: Optional regularizer function for the output
88 (see `keras.regularizers`).
89 depthwise_constraint: Optional projection function to be applied to the
90 depthwise kernel after being updated by an `Optimizer` (e.g. used for
91 norm constraints or value constraints for layer weights). The function
92 must take as input the unprojected variable and must return the
93 projected variable (which must have the same shape). Constraints are
94 not safe to use when doing asynchronous distributed training
95 (see `keras.constraints`).
96 pointwise_constraint: Optional projection function to be applied to the
97 pointwise kernel after being updated by an `Optimizer`
98 (see `keras.constraints`).
99 bias_constraint: Optional projection function to be applied to the
100 bias after being updated by an `Optimizer`
101 (see `keras.constraints`).
102 trainable: Boolean, if `True` the weights of this layer will be marked as
103 trainable (and listed in `layer.trainable_weights`).
105 Input shape:
106 3D tensor with shape:
107 `(batch_size, channels, steps)` if data_format='channels_first'
108 or 3D tensor with shape:
109 `(batch_size, steps, channels)` if data_format='channels_last'.
111 Output shape:
112 3D tensor with shape:
113 `(batch_size, filters, new_steps)` if data_format='channels_first'
114 or 3D tensor with shape:
115 `(batch_size, new_steps, filters)` if data_format='channels_last'.
116 `new_steps` value might have changed due to padding or strides.
118 Returns:
119 A tensor of rank 3 representing
120 `activation(separableconv1d(inputs, kernel) + bias)`.
121 """
123 def __init__(
124 self,
125 filters,
126 kernel_size,
127 strides=1,
128 padding="valid",
129 data_format=None,
130 dilation_rate=1,
131 depth_multiplier=1,
132 activation=None,
133 use_bias=True,
134 depthwise_initializer="glorot_uniform",
135 pointwise_initializer="glorot_uniform",
136 bias_initializer="zeros",
137 depthwise_regularizer=None,
138 pointwise_regularizer=None,
139 bias_regularizer=None,
140 activity_regularizer=None,
141 depthwise_constraint=None,
142 pointwise_constraint=None,
143 bias_constraint=None,
144 **kwargs
145 ):
146 super().__init__(
147 rank=1,
148 filters=filters,
149 kernel_size=kernel_size,
150 strides=strides,
151 padding=padding,
152 data_format=data_format,
153 dilation_rate=dilation_rate,
154 depth_multiplier=depth_multiplier,
155 activation=activations.get(activation),
156 use_bias=use_bias,
157 depthwise_initializer=initializers.get(depthwise_initializer),
158 pointwise_initializer=initializers.get(pointwise_initializer),
159 bias_initializer=initializers.get(bias_initializer),
160 depthwise_regularizer=regularizers.get(depthwise_regularizer),
161 pointwise_regularizer=regularizers.get(pointwise_regularizer),
162 bias_regularizer=regularizers.get(bias_regularizer),
163 activity_regularizer=regularizers.get(activity_regularizer),
164 depthwise_constraint=constraints.get(depthwise_constraint),
165 pointwise_constraint=constraints.get(pointwise_constraint),
166 bias_constraint=constraints.get(bias_constraint),
167 **kwargs
168 )
170 def call(self, inputs):
171 if self.padding == "causal":
172 inputs = tf.pad(inputs, self._compute_causal_padding(inputs))
173 if self.data_format == "channels_last":
174 strides = (1,) + self.strides * 2 + (1,)
175 spatial_start_dim = 1
176 else:
177 strides = (1, 1) + self.strides * 2
178 spatial_start_dim = 2
180 # Explicitly broadcast inputs and kernels to 4D.
181 # TODO(fchollet): refactor when a native separable_conv1d op is
182 # available.
183 inputs = tf.expand_dims(inputs, spatial_start_dim)
184 depthwise_kernel = tf.expand_dims(self.depthwise_kernel, 0)
185 pointwise_kernel = tf.expand_dims(self.pointwise_kernel, 0)
186 dilation_rate = (1,) + self.dilation_rate
188 if self.padding == "causal":
189 op_padding = "valid"
190 else:
191 op_padding = self.padding
192 outputs = tf.compat.v1.nn.separable_conv2d(
193 inputs,
194 depthwise_kernel,
195 pointwise_kernel,
196 strides=strides,
197 padding=op_padding.upper(),
198 rate=dilation_rate,
199 data_format=conv_utils.convert_data_format(
200 self.data_format, ndim=4
201 ),
202 )
204 if self.use_bias:
205 outputs = tf.nn.bias_add(
206 outputs,
207 self.bias,
208 data_format=conv_utils.convert_data_format(
209 self.data_format, ndim=4
210 ),
211 )
213 outputs = tf.squeeze(outputs, [spatial_start_dim])
215 if self.activation is not None:
216 return self.activation(outputs)
217 return outputs
220# Alias
222SeparableConvolution1D = SeparableConv1D