Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/applications/densenet.py: 30%
122 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
16"""DenseNet models for Keras.
18Reference:
19 - [Densely Connected Convolutional Networks](
20 https://arxiv.org/abs/1608.06993) (CVPR 2017)
21"""
23import tensorflow.compat.v2 as tf
25from keras.src import backend
26from keras.src.applications import imagenet_utils
27from keras.src.engine import training
28from keras.src.layers import VersionAwareLayers
29from keras.src.utils import data_utils
30from keras.src.utils import layer_utils
32# isort: off
33from tensorflow.python.util.tf_export import keras_export
35BASE_WEIGHTS_PATH = (
36 "https://storage.googleapis.com/tensorflow/keras-applications/densenet/"
37)
38DENSENET121_WEIGHT_PATH = (
39 BASE_WEIGHTS_PATH + "densenet121_weights_tf_dim_ordering_tf_kernels.h5"
40)
41DENSENET121_WEIGHT_PATH_NO_TOP = (
42 BASE_WEIGHTS_PATH
43 + "densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5"
44)
45DENSENET169_WEIGHT_PATH = (
46 BASE_WEIGHTS_PATH + "densenet169_weights_tf_dim_ordering_tf_kernels.h5"
47)
48DENSENET169_WEIGHT_PATH_NO_TOP = (
49 BASE_WEIGHTS_PATH
50 + "densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5"
51)
52DENSENET201_WEIGHT_PATH = (
53 BASE_WEIGHTS_PATH + "densenet201_weights_tf_dim_ordering_tf_kernels.h5"
54)
55DENSENET201_WEIGHT_PATH_NO_TOP = (
56 BASE_WEIGHTS_PATH
57 + "densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5"
58)
60layers = VersionAwareLayers()
63def dense_block(x, blocks, name):
64 """A dense block.
66 Args:
67 x: input tensor.
68 blocks: integer, the number of building blocks.
69 name: string, block label.
71 Returns:
72 Output tensor for the block.
73 """
74 for i in range(blocks):
75 x = conv_block(x, 32, name=name + "_block" + str(i + 1))
76 return x
79def transition_block(x, reduction, name):
80 """A transition block.
82 Args:
83 x: input tensor.
84 reduction: float, compression rate at transition layers.
85 name: string, block label.
87 Returns:
88 output tensor for the block.
89 """
90 bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
91 x = layers.BatchNormalization(
92 axis=bn_axis, epsilon=1.001e-5, name=name + "_bn"
93 )(x)
94 x = layers.Activation("relu", name=name + "_relu")(x)
95 x = layers.Conv2D(
96 int(backend.int_shape(x)[bn_axis] * reduction),
97 1,
98 use_bias=False,
99 name=name + "_conv",
100 )(x)
101 x = layers.AveragePooling2D(2, strides=2, name=name + "_pool")(x)
102 return x
105def conv_block(x, growth_rate, name):
106 """A building block for a dense block.
108 Args:
109 x: input tensor.
110 growth_rate: float, growth rate at dense layers.
111 name: string, block label.
113 Returns:
114 Output tensor for the block.
115 """
116 bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
117 x1 = layers.BatchNormalization(
118 axis=bn_axis, epsilon=1.001e-5, name=name + "_0_bn"
119 )(x)
120 x1 = layers.Activation("relu", name=name + "_0_relu")(x1)
121 x1 = layers.Conv2D(
122 4 * growth_rate, 1, use_bias=False, name=name + "_1_conv"
123 )(x1)
124 x1 = layers.BatchNormalization(
125 axis=bn_axis, epsilon=1.001e-5, name=name + "_1_bn"
126 )(x1)
127 x1 = layers.Activation("relu", name=name + "_1_relu")(x1)
128 x1 = layers.Conv2D(
129 growth_rate, 3, padding="same", use_bias=False, name=name + "_2_conv"
130 )(x1)
131 x = layers.Concatenate(axis=bn_axis, name=name + "_concat")([x, x1])
132 return x
135def DenseNet(
136 blocks,
137 include_top=True,
138 weights="imagenet",
139 input_tensor=None,
140 input_shape=None,
141 pooling=None,
142 classes=1000,
143 classifier_activation="softmax",
144):
145 """Instantiates the DenseNet architecture.
147 Reference:
148 - [Densely Connected Convolutional Networks](
149 https://arxiv.org/abs/1608.06993) (CVPR 2017)
151 This function returns a Keras image classification model,
152 optionally loaded with weights pre-trained on ImageNet.
154 For image classification use cases, see
155 [this page for detailed examples](
156 https://keras.io/api/applications/#usage-examples-for-image-classification-models).
158 For transfer learning use cases, make sure to read the
159 [guide to transfer learning & fine-tuning](
160 https://keras.io/guides/transfer_learning/).
162 Note: each Keras Application expects a specific kind of input preprocessing.
163 For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
164 inputs before passing them to the model.
165 `densenet.preprocess_input` will scale pixels between 0 and 1 and then
166 will normalize each channel with respect to the ImageNet dataset statistics.
168 Args:
169 blocks: numbers of building blocks for the four dense layers.
170 include_top: whether to include the fully-connected
171 layer at the top of the network.
172 weights: one of `None` (random initialization),
173 'imagenet' (pre-training on ImageNet),
174 or the path to the weights file to be loaded.
175 input_tensor: optional Keras tensor
176 (i.e. output of `layers.Input()`)
177 to use as image input for the model.
178 input_shape: optional shape tuple, only to be specified
179 if `include_top` is False (otherwise the input shape
180 has to be `(224, 224, 3)` (with `'channels_last'` data format)
181 or `(3, 224, 224)` (with `'channels_first'` data format).
182 It should have exactly 3 inputs channels,
183 and width and height should be no smaller than 32.
184 E.g. `(200, 200, 3)` would be one valid value.
185 pooling: optional pooling mode for feature extraction
186 when `include_top` is `False`.
187 - `None` means that the output of the model will be
188 the 4D tensor output of the
189 last convolutional block.
190 - `avg` means that global average pooling
191 will be applied to the output of the
192 last convolutional block, and thus
193 the output of the model will be a 2D tensor.
194 - `max` means that global max pooling will
195 be applied.
196 classes: optional number of classes to classify images
197 into, only to be specified if `include_top` is True, and
198 if no `weights` argument is specified.
199 classifier_activation: A `str` or callable. The activation function to use
200 on the "top" layer. Ignored unless `include_top=True`. Set
201 `classifier_activation=None` to return the logits of the "top" layer.
202 When loading pretrained weights, `classifier_activation` can only
203 be `None` or `"softmax"`.
205 Returns:
206 A `keras.Model` instance.
207 """
208 if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
209 raise ValueError(
210 "The `weights` argument should be either "
211 "`None` (random initialization), `imagenet` "
212 "(pre-training on ImageNet), "
213 "or the path to the weights file to be loaded."
214 )
216 if weights == "imagenet" and include_top and classes != 1000:
217 raise ValueError(
218 'If using `weights` as `"imagenet"` with `include_top`'
219 " as true, `classes` should be 1000"
220 )
222 # Determine proper input shape
223 input_shape = imagenet_utils.obtain_input_shape(
224 input_shape,
225 default_size=224,
226 min_size=32,
227 data_format=backend.image_data_format(),
228 require_flatten=include_top,
229 weights=weights,
230 )
232 if input_tensor is None:
233 img_input = layers.Input(shape=input_shape)
234 else:
235 if not backend.is_keras_tensor(input_tensor):
236 img_input = layers.Input(tensor=input_tensor, shape=input_shape)
237 else:
238 img_input = input_tensor
240 bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
242 x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
243 x = layers.Conv2D(64, 7, strides=2, use_bias=False, name="conv1/conv")(x)
244 x = layers.BatchNormalization(
245 axis=bn_axis, epsilon=1.001e-5, name="conv1/bn"
246 )(x)
247 x = layers.Activation("relu", name="conv1/relu")(x)
248 x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
249 x = layers.MaxPooling2D(3, strides=2, name="pool1")(x)
251 x = dense_block(x, blocks[0], name="conv2")
252 x = transition_block(x, 0.5, name="pool2")
253 x = dense_block(x, blocks[1], name="conv3")
254 x = transition_block(x, 0.5, name="pool3")
255 x = dense_block(x, blocks[2], name="conv4")
256 x = transition_block(x, 0.5, name="pool4")
257 x = dense_block(x, blocks[3], name="conv5")
259 x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name="bn")(x)
260 x = layers.Activation("relu", name="relu")(x)
262 if include_top:
263 x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
265 imagenet_utils.validate_activation(classifier_activation, weights)
266 x = layers.Dense(
267 classes, activation=classifier_activation, name="predictions"
268 )(x)
269 else:
270 if pooling == "avg":
271 x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
272 elif pooling == "max":
273 x = layers.GlobalMaxPooling2D(name="max_pool")(x)
275 # Ensure that the model takes into account
276 # any potential predecessors of `input_tensor`.
277 if input_tensor is not None:
278 inputs = layer_utils.get_source_inputs(input_tensor)
279 else:
280 inputs = img_input
282 # Create model.
283 if blocks == [6, 12, 24, 16]:
284 model = training.Model(inputs, x, name="densenet121")
285 elif blocks == [6, 12, 32, 32]:
286 model = training.Model(inputs, x, name="densenet169")
287 elif blocks == [6, 12, 48, 32]:
288 model = training.Model(inputs, x, name="densenet201")
289 else:
290 model = training.Model(inputs, x, name="densenet")
292 # Load weights.
293 if weights == "imagenet":
294 if include_top:
295 if blocks == [6, 12, 24, 16]:
296 weights_path = data_utils.get_file(
297 "densenet121_weights_tf_dim_ordering_tf_kernels.h5",
298 DENSENET121_WEIGHT_PATH,
299 cache_subdir="models",
300 file_hash="9d60b8095a5708f2dcce2bca79d332c7",
301 )
302 elif blocks == [6, 12, 32, 32]:
303 weights_path = data_utils.get_file(
304 "densenet169_weights_tf_dim_ordering_tf_kernels.h5",
305 DENSENET169_WEIGHT_PATH,
306 cache_subdir="models",
307 file_hash="d699b8f76981ab1b30698df4c175e90b",
308 )
309 elif blocks == [6, 12, 48, 32]:
310 weights_path = data_utils.get_file(
311 "densenet201_weights_tf_dim_ordering_tf_kernels.h5",
312 DENSENET201_WEIGHT_PATH,
313 cache_subdir="models",
314 file_hash="1ceb130c1ea1b78c3bf6114dbdfd8807",
315 )
316 else:
317 if blocks == [6, 12, 24, 16]:
318 weights_path = data_utils.get_file(
319 "densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5",
320 DENSENET121_WEIGHT_PATH_NO_TOP,
321 cache_subdir="models",
322 file_hash="30ee3e1110167f948a6b9946edeeb738",
323 )
324 elif blocks == [6, 12, 32, 32]:
325 weights_path = data_utils.get_file(
326 "densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5",
327 DENSENET169_WEIGHT_PATH_NO_TOP,
328 cache_subdir="models",
329 file_hash="b8c4d4c20dd625c148057b9ff1c1176b",
330 )
331 elif blocks == [6, 12, 48, 32]:
332 weights_path = data_utils.get_file(
333 "densenet201_weights_tf_dim_ordering_tf_kernels_notop.h5",
334 DENSENET201_WEIGHT_PATH_NO_TOP,
335 cache_subdir="models",
336 file_hash="c13680b51ded0fb44dff2d8f86ac8bb1",
337 )
338 model.load_weights(weights_path)
339 elif weights is not None:
340 model.load_weights(weights)
342 return model
345@keras_export(
346 "keras.applications.densenet.DenseNet121", "keras.applications.DenseNet121"
347)
348def DenseNet121(
349 include_top=True,
350 weights="imagenet",
351 input_tensor=None,
352 input_shape=None,
353 pooling=None,
354 classes=1000,
355 classifier_activation="softmax",
356):
357 """Instantiates the Densenet121 architecture."""
358 return DenseNet(
359 [6, 12, 24, 16],
360 include_top,
361 weights,
362 input_tensor,
363 input_shape,
364 pooling,
365 classes,
366 classifier_activation,
367 )
370@keras_export(
371 "keras.applications.densenet.DenseNet169", "keras.applications.DenseNet169"
372)
373def DenseNet169(
374 include_top=True,
375 weights="imagenet",
376 input_tensor=None,
377 input_shape=None,
378 pooling=None,
379 classes=1000,
380 classifier_activation="softmax",
381):
382 """Instantiates the Densenet169 architecture."""
383 return DenseNet(
384 [6, 12, 32, 32],
385 include_top,
386 weights,
387 input_tensor,
388 input_shape,
389 pooling,
390 classes,
391 classifier_activation,
392 )
395@keras_export(
396 "keras.applications.densenet.DenseNet201", "keras.applications.DenseNet201"
397)
398def DenseNet201(
399 include_top=True,
400 weights="imagenet",
401 input_tensor=None,
402 input_shape=None,
403 pooling=None,
404 classes=1000,
405 classifier_activation="softmax",
406):
407 """Instantiates the Densenet201 architecture."""
408 return DenseNet(
409 [6, 12, 48, 32],
410 include_top,
411 weights,
412 input_tensor,
413 input_shape,
414 pooling,
415 classes,
416 classifier_activation,
417 )
420@keras_export("keras.applications.densenet.preprocess_input")
421def preprocess_input(x, data_format=None):
422 return imagenet_utils.preprocess_input(
423 x, data_format=data_format, mode="torch"
424 )
427@keras_export("keras.applications.densenet.decode_predictions")
428def decode_predictions(preds, top=5):
429 return imagenet_utils.decode_predictions(preds, top=top)
432preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
433 mode="",
434 ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TORCH,
435 error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
436)
437decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
439DOC = """
441 Reference:
442 - [Densely Connected Convolutional Networks](
443 https://arxiv.org/abs/1608.06993) (CVPR 2017)
445 Optionally loads weights pre-trained on ImageNet.
446 Note that the data format convention used by the model is
447 the one specified in your Keras config at `~/.keras/keras.json`.
449 Note: each Keras Application expects a specific kind of input preprocessing.
450 For DenseNet, call `tf.keras.applications.densenet.preprocess_input` on your
451 inputs before passing them to the model.
453 Args:
454 include_top: whether to include the fully-connected
455 layer at the top of the network.
456 weights: one of `None` (random initialization),
457 'imagenet' (pre-training on ImageNet),
458 or the path to the weights file to be loaded.
459 input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
460 to use as image input for the model.
461 input_shape: optional shape tuple, only to be specified
462 if `include_top` is False (otherwise the input shape
463 has to be `(224, 224, 3)` (with `'channels_last'` data format)
464 or `(3, 224, 224)` (with `'channels_first'` data format).
465 It should have exactly 3 inputs channels,
466 and width and height should be no smaller than 32.
467 E.g. `(200, 200, 3)` would be one valid value.
468 pooling: Optional pooling mode for feature extraction
469 when `include_top` is `False`.
470 - `None` means that the output of the model will be
471 the 4D tensor output of the
472 last convolutional block.
473 - `avg` means that global average pooling
474 will be applied to the output of the
475 last convolutional block, and thus
476 the output of the model will be a 2D tensor.
477 - `max` means that global max pooling will
478 be applied.
479 classes: optional number of classes to classify images
480 into, only to be specified if `include_top` is True, and
481 if no `weights` argument is specified.
482 classifier_activation: A `str` or callable. The activation function to use
483 on the "top" layer. Ignored unless `include_top=True`. Set
484 `classifier_activation=None` to return the logits of the "top" layer.
485 When loading pretrained weights, `classifier_activation` can only
486 be `None` or `"softmax"`.
488 Returns:
489 A Keras model instance.
490"""
492setattr(DenseNet121, "__doc__", DenseNet121.__doc__ + DOC)
493setattr(DenseNet169, "__doc__", DenseNet169.__doc__ + DOC)
494setattr(DenseNet201, "__doc__", DenseNet201.__doc__ + DOC)