Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/layers/convolutional/conv2d.py: 93%
14 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Keras 2D convolution layer."""
18from keras.src import activations
19from keras.src import constraints
20from keras.src import initializers
21from keras.src import regularizers
22from keras.src.dtensor import utils
23from keras.src.layers.convolutional.base_conv import Conv
25# isort: off
26from tensorflow.python.util.tf_export import keras_export
29@keras_export("keras.layers.Conv2D", "keras.layers.Convolution2D")
30class Conv2D(Conv):
31 """2D convolution layer (e.g. spatial convolution over images).
33 This layer creates a convolution kernel that is convolved
34 with the layer input to produce a tensor of
35 outputs. If `use_bias` is True,
36 a bias vector is created and added to the outputs. Finally, if
37 `activation` is not `None`, it is applied to the outputs as well.
39 When using this layer as the first layer in a model,
40 provide the keyword argument `input_shape`
41 (tuple of integers or `None`, does not include the sample axis),
42 e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
43 in `data_format="channels_last"`. You can use `None` when
44 a dimension has variable size.
46 Examples:
48 >>> # The inputs are 28x28 RGB images with `channels_last` and the batch
49 >>> # size is 4.
50 >>> input_shape = (4, 28, 28, 3)
51 >>> x = tf.random.normal(input_shape)
52 >>> y = tf.keras.layers.Conv2D(
53 ... 2, 3, activation='relu', input_shape=input_shape[1:])(x)
54 >>> print(y.shape)
55 (4, 26, 26, 2)
57 >>> # With `dilation_rate` as 2.
58 >>> input_shape = (4, 28, 28, 3)
59 >>> x = tf.random.normal(input_shape)
60 >>> y = tf.keras.layers.Conv2D(
61 ... 2, 3,
62 ... activation='relu',
63 ... dilation_rate=2,
64 ... input_shape=input_shape[1:])(x)
65 >>> print(y.shape)
66 (4, 24, 24, 2)
68 >>> # With `padding` as "same".
69 >>> input_shape = (4, 28, 28, 3)
70 >>> x = tf.random.normal(input_shape)
71 >>> y = tf.keras.layers.Conv2D(
72 ... 2, 3, activation='relu', padding="same", input_shape=input_shape[1:])(x)
73 >>> print(y.shape)
74 (4, 28, 28, 2)
76 >>> # With extended batch shape [4, 7]:
77 >>> input_shape = (4, 7, 28, 28, 3)
78 >>> x = tf.random.normal(input_shape)
79 >>> y = tf.keras.layers.Conv2D(
80 ... 2, 3, activation='relu', input_shape=input_shape[2:])(x)
81 >>> print(y.shape)
82 (4, 7, 26, 26, 2)
85 Args:
86 filters: Integer, the dimensionality of the output space (i.e. the number
87 of output filters in the convolution).
88 kernel_size: An integer or tuple/list of 2 integers, specifying the height
89 and width of the 2D convolution window. Can be a single integer to
90 specify the same value for all spatial dimensions.
91 strides: An integer or tuple/list of 2 integers, specifying the strides of
92 the convolution along the height and width. Can be a single integer to
93 specify the same value for all spatial dimensions. Specifying any stride
94 value != 1 is incompatible with specifying any `dilation_rate` value !=
95 1.
96 padding: one of `"valid"` or `"same"` (case-insensitive).
97 `"valid"` means no padding. `"same"` results in padding with zeros
98 evenly to the left/right or up/down of the input. When `padding="same"`
99 and `strides=1`, the output has the same size as the input.
100 data_format: A string, one of `channels_last` (default) or
101 `channels_first`. The ordering of the dimensions in the inputs.
102 `channels_last` corresponds to inputs with shape `(batch_size, height,
103 width, channels)` while `channels_first` corresponds to inputs with
104 shape `(batch_size, channels, height, width)`. It defaults to the
105 `image_data_format` value found in your Keras config file at
106 `~/.keras/keras.json`. If you never set it, then it will be
107 `channels_last`. Note that the `channels_first` format is currently not
108 supported by TensorFlow on CPU.
109 dilation_rate: an integer or tuple/list of 2 integers, specifying the
110 dilation rate to use for dilated convolution. Can be a single integer to
111 specify the same value for all spatial dimensions. Currently, specifying
112 any `dilation_rate` value != 1 is incompatible with specifying any
113 stride value != 1.
114 groups: A positive integer specifying the number of groups in which the
115 input is split along the channel axis. Each group is convolved
116 separately with `filters / groups` filters. The output is the
117 concatenation of all the `groups` results along the channel axis. Input
118 channels and `filters` must both be divisible by `groups`.
119 activation: Activation function to use. If you don't specify anything, no
120 activation is applied (see `keras.activations`).
121 use_bias: Boolean, whether the layer uses a bias vector.
122 kernel_initializer: Initializer for the `kernel` weights matrix (see
123 `keras.initializers`). Defaults to 'glorot_uniform'.
124 bias_initializer: Initializer for the bias vector (see
125 `keras.initializers`). Defaults to 'zeros'.
126 kernel_regularizer: Regularizer function applied to the `kernel` weights
127 matrix (see `keras.regularizers`).
128 bias_regularizer: Regularizer function applied to the bias vector (see
129 `keras.regularizers`).
130 activity_regularizer: Regularizer function applied to the output of the
131 layer (its "activation") (see `keras.regularizers`).
132 kernel_constraint: Constraint function applied to the kernel matrix (see
133 `keras.constraints`).
134 bias_constraint: Constraint function applied to the bias vector (see
135 `keras.constraints`).
137 Input shape:
138 4+D tensor with shape: `batch_shape + (channels, rows, cols)` if
139 `data_format='channels_first'`
140 or 4+D tensor with shape: `batch_shape + (rows, cols, channels)` if
141 `data_format='channels_last'`.
143 Output shape:
144 4+D tensor with shape: `batch_shape + (filters, new_rows, new_cols)` if
145 `data_format='channels_first'` or 4+D tensor with shape: `batch_shape +
146 (new_rows, new_cols, filters)` if `data_format='channels_last'`. `rows`
147 and `cols` values might have changed due to padding.
149 Returns:
150 A tensor of rank 4+ representing
151 `activation(conv2d(inputs, kernel) + bias)`.
153 Raises:
154 ValueError: if `padding` is `"causal"`.
155 ValueError: when both `strides > 1` and `dilation_rate > 1`.
156 """
158 @utils.allow_initializer_layout
159 def __init__(
160 self,
161 filters,
162 kernel_size,
163 strides=(1, 1),
164 padding="valid",
165 data_format=None,
166 dilation_rate=(1, 1),
167 groups=1,
168 activation=None,
169 use_bias=True,
170 kernel_initializer="glorot_uniform",
171 bias_initializer="zeros",
172 kernel_regularizer=None,
173 bias_regularizer=None,
174 activity_regularizer=None,
175 kernel_constraint=None,
176 bias_constraint=None,
177 **kwargs
178 ):
179 super().__init__(
180 rank=2,
181 filters=filters,
182 kernel_size=kernel_size,
183 strides=strides,
184 padding=padding,
185 data_format=data_format,
186 dilation_rate=dilation_rate,
187 groups=groups,
188 activation=activations.get(activation),
189 use_bias=use_bias,
190 kernel_initializer=initializers.get(kernel_initializer),
191 bias_initializer=initializers.get(bias_initializer),
192 kernel_regularizer=regularizers.get(kernel_regularizer),
193 bias_regularizer=regularizers.get(bias_regularizer),
194 activity_regularizer=regularizers.get(activity_regularizer),
195 kernel_constraint=constraints.get(kernel_constraint),
196 bias_constraint=constraints.get(bias_constraint),
197 **kwargs
198 )
201# Alias
203Convolution2D = Conv2D