Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/keras/layers/merge.py: 22%

340 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15# pylint: disable=not-callable 

16# pylint: disable=redefined-builtin 

17"""Layers that can merge several inputs into one.""" 

18 

19from tensorflow.python.keras import backend 

20from tensorflow.python.keras.engine import base_layer_utils 

21from tensorflow.python.keras.engine.base_layer import Layer 

22from tensorflow.python.keras.utils import tf_utils 

23from tensorflow.python.ops import array_ops 

24from tensorflow.python.ops import array_ops_stack 

25from tensorflow.python.ops import math_ops 

26from tensorflow.python.ops import nn 

27from tensorflow.python.util.tf_export import keras_export 

28 

29 

30class _Merge(Layer): 

31 """Generic merge layer for elementwise merge functions. 

32 

33 Used to implement `Sum`, `Average`, etc. 

34 """ 

35 

36 def __init__(self, **kwargs): 

37 """Intializes a Merge layer. 

38 

39 Args: 

40 **kwargs: standard layer keyword arguments. 

41 """ 

42 super(_Merge, self).__init__(**kwargs) 

43 self.supports_masking = True 

44 

45 def _merge_function(self, inputs): 

46 raise NotImplementedError 

47 

48 def _compute_elemwise_op_output_shape(self, shape1, shape2): 

49 """Computes the shape of the resultant of an elementwise operation. 

50 

51 Args: 

52 shape1: tuple or None. Shape of the first tensor 

53 shape2: tuple or None. Shape of the second tensor 

54 

55 Returns: 

56 expected output shape when an element-wise operation is 

57 carried out on 2 tensors with shapes shape1 and shape2. 

58 tuple or None. 

59 

60 Raises: 

61 ValueError: if shape1 and shape2 are not compatible for 

62 element-wise operations. 

63 """ 

64 if None in [shape1, shape2]: 

65 return None 

66 elif len(shape1) < len(shape2): 

67 return self._compute_elemwise_op_output_shape(shape2, shape1) 

68 elif not shape2: 

69 return shape1 

70 output_shape = list(shape1[:-len(shape2)]) 

71 for i, j in zip(shape1[-len(shape2):], shape2): 

72 if i is None or j is None: 

73 output_shape.append(None) 

74 elif i == 1: 

75 output_shape.append(j) 

76 elif j == 1: 

77 output_shape.append(i) 

78 else: 

79 if i != j: 

80 raise ValueError( 

81 'Operands could not be broadcast ' 

82 'together with shapes ' + str(shape1) + ' ' + str(shape2)) 

83 output_shape.append(i) 

84 return tuple(output_shape) 

85 

86 @tf_utils.shape_type_conversion 

87 def build(self, input_shape): 

88 # Used purely for shape validation. 

89 if not isinstance(input_shape[0], tuple): 

90 raise ValueError('A merge layer should be called on a list of inputs.') 

91 if len(input_shape) < 2: 

92 raise ValueError('A merge layer should be called ' 

93 'on a list of at least 2 inputs. ' 

94 'Got ' + str(len(input_shape)) + ' inputs.') 

95 batch_sizes = {s[0] for s in input_shape if s} - {None} 

96 if len(batch_sizes) > 1: 

97 raise ValueError( 

98 'Can not merge tensors with different ' 

99 'batch sizes. Got tensors with shapes : ' + str(input_shape)) 

100 if input_shape[0] is None: 

101 output_shape = None 

102 else: 

103 output_shape = input_shape[0][1:] 

104 for i in range(1, len(input_shape)): 

105 if input_shape[i] is None: 

106 shape = None 

107 else: 

108 shape = input_shape[i][1:] 

109 output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) 

110 # If the inputs have different ranks, we have to reshape them 

111 # to make them broadcastable. 

112 if None not in input_shape and len(set(map(len, input_shape))) == 1: 

113 self._reshape_required = False 

114 else: 

115 self._reshape_required = True 

116 

117 def call(self, inputs): 

118 if not isinstance(inputs, (list, tuple)): 

119 raise ValueError('A merge layer should be called on a list of inputs.') 

120 if self._reshape_required: 

121 reshaped_inputs = [] 

122 input_ndims = list(map(backend.ndim, inputs)) 

123 if None not in input_ndims: 

124 # If ranks of all inputs are available, 

125 # we simply expand each of them at axis=1 

126 # until all of them have the same rank. 

127 max_ndim = max(input_ndims) 

128 for x in inputs: 

129 x_ndim = backend.ndim(x) 

130 for _ in range(max_ndim - x_ndim): 

131 x = array_ops.expand_dims(x, axis=1) 

132 reshaped_inputs.append(x) 

133 return self._merge_function(reshaped_inputs) 

134 else: 

135 # Transpose all inputs so that batch size is the last dimension. 

136 # (batch_size, dim1, dim2, ... ) -> (dim1, dim2, ... , batch_size) 

137 transposed = False 

138 for x in inputs: 

139 x_ndim = backend.ndim(x) 

140 if x_ndim is None: 

141 x_shape = array_ops.shape(x) 

142 batch_size = x_shape[0] 

143 new_shape = backend.concatenate( 

144 [x_shape[1:], 

145 array_ops.expand_dims(batch_size, axis=-1)]) 

146 x_transposed = array_ops.reshape( 

147 x, 

148 array_ops_stack.stack( 

149 [batch_size, math_ops.reduce_prod(x_shape[1:])], axis=0)) 

150 x_transposed = array_ops.transpose(x_transposed, perm=(1, 0)) 

151 x_transposed = array_ops.reshape(x_transposed, new_shape) 

152 reshaped_inputs.append(x_transposed) 

153 transposed = True 

154 elif x_ndim > 1: 

155 dims = list(range(1, x_ndim)) + [0] 

156 reshaped_inputs.append(array_ops.transpose(x, perm=dims)) 

157 transposed = True 

158 else: 

159 # We don't transpose inputs if they are 1D vectors or scalars. 

160 reshaped_inputs.append(x) 

161 y = self._merge_function(reshaped_inputs) 

162 y_ndim = backend.ndim(y) 

163 if transposed: 

164 # If inputs have been transposed, we have to transpose the output too. 

165 if y_ndim is None: 

166 y_shape = array_ops.shape(y) 

167 y_ndim = array_ops.shape(y_shape)[0] 

168 batch_size = y_shape[y_ndim - 1] 

169 new_shape = backend.concatenate([ 

170 array_ops.expand_dims(batch_size, axis=-1), y_shape[:y_ndim - 1] 

171 ]) 

172 y = array_ops.reshape(y, (-1, batch_size)) 

173 y = array_ops.transpose(y, perm=(1, 0)) 

174 y = array_ops.reshape(y, new_shape) 

175 elif y_ndim > 1: 

176 dims = [y_ndim - 1] + list(range(y_ndim - 1)) 

177 y = array_ops.transpose(y, perm=dims) 

178 return y 

179 else: 

180 return self._merge_function(inputs) 

181 

182 @tf_utils.shape_type_conversion 

183 def compute_output_shape(self, input_shape): 

184 if input_shape[0] is None: 

185 output_shape = None 

186 else: 

187 output_shape = input_shape[0][1:] 

188 for i in range(1, len(input_shape)): 

189 if input_shape[i] is None: 

190 shape = None 

191 else: 

192 shape = input_shape[i][1:] 

193 output_shape = self._compute_elemwise_op_output_shape(output_shape, shape) 

194 batch_sizes = {s[0] for s in input_shape if s is not None} - {None} 

195 if len(batch_sizes) == 1: 

196 output_shape = (list(batch_sizes)[0],) + output_shape 

197 else: 

198 output_shape = (None,) + output_shape 

199 return output_shape 

200 

201 def compute_mask(self, inputs, mask=None): 

202 if mask is None: 

203 return None 

204 if not isinstance(mask, (tuple, list)): 

205 raise ValueError('`mask` should be a list.') 

206 if not isinstance(inputs, (tuple, list)): 

207 raise ValueError('`inputs` should be a list.') 

208 if len(mask) != len(inputs): 

209 raise ValueError('The lists `inputs` and `mask` ' 

210 'should have the same length.') 

211 if all(m is None for m in mask): 

212 return None 

213 masks = [array_ops.expand_dims(m, axis=0) for m in mask if m is not None] 

214 return backend.all( 

215 backend.concatenate(masks, axis=0), axis=0, keepdims=False) 

216 

217 

218@keras_export('keras.layers.Add') 

219class Add(_Merge): 

220 """Layer that adds a list of inputs. 

221 

222 It takes as input a list of tensors, 

223 all of the same shape, and returns 

224 a single tensor (also of the same shape). 

225 

226 Examples: 

227 

228 >>> input_shape = (2, 3, 4) 

229 >>> x1 = tf.random.normal(input_shape) 

230 >>> x2 = tf.random.normal(input_shape) 

231 >>> y = tf.keras.layers.Add()([x1, x2]) 

232 >>> print(y.shape) 

233 (2, 3, 4) 

234 

235 Used in a functional model: 

236 

237 >>> input1 = tf.keras.layers.Input(shape=(16,)) 

238 >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) 

239 >>> input2 = tf.keras.layers.Input(shape=(32,)) 

240 >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) 

241 >>> # equivalent to `added = tf.keras.layers.add([x1, x2])` 

242 >>> added = tf.keras.layers.Add()([x1, x2]) 

243 >>> out = tf.keras.layers.Dense(4)(added) 

244 >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) 

245 

246 """ 

247 

248 def _merge_function(self, inputs): 

249 output = inputs[0] 

250 for i in range(1, len(inputs)): 

251 output += inputs[i] 

252 return output 

253 

254 

255@keras_export('keras.layers.Subtract') 

256class Subtract(_Merge): 

257 """Layer that subtracts two inputs. 

258 

259 It takes as input a list of tensors of size 2, 

260 both of the same shape, and returns a single tensor, (inputs[0] - inputs[1]), 

261 also of the same shape. 

262 

263 Examples: 

264 

265 ```python 

266 import keras 

267 

268 input1 = keras.layers.Input(shape=(16,)) 

269 x1 = keras.layers.Dense(8, activation='relu')(input1) 

270 input2 = keras.layers.Input(shape=(32,)) 

271 x2 = keras.layers.Dense(8, activation='relu')(input2) 

272 # Equivalent to subtracted = keras.layers.subtract([x1, x2]) 

273 subtracted = keras.layers.Subtract()([x1, x2]) 

274 

275 out = keras.layers.Dense(4)(subtracted) 

276 model = keras.models.Model(inputs=[input1, input2], outputs=out) 

277 ``` 

278 """ 

279 

280 @tf_utils.shape_type_conversion 

281 def build(self, input_shape): 

282 super(Subtract, self).build(input_shape) 

283 if len(input_shape) != 2: 

284 raise ValueError('A `Subtract` layer should be called ' 

285 'on exactly 2 inputs') 

286 

287 def _merge_function(self, inputs): 

288 if len(inputs) != 2: 

289 raise ValueError('A `Subtract` layer should be called ' 

290 'on exactly 2 inputs') 

291 return inputs[0] - inputs[1] 

292 

293 

294@keras_export('keras.layers.Multiply') 

295class Multiply(_Merge): 

296 """Layer that multiplies (element-wise) a list of inputs. 

297 

298 It takes as input a list of tensors, all of the same shape, and returns 

299 a single tensor (also of the same shape). 

300 

301 >>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1), 

302 ... np.arange(5, 10).reshape(5, 1)]) 

303 <tf.Tensor: shape=(5, 1), dtype=int64, numpy= 

304 array([[ 0], 

305 [ 6], 

306 [14], 

307 [24], 

308 [36]])> 

309 

310 >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) 

311 >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) 

312 >>> multiplied = tf.keras.layers.Multiply()([x1, x2]) 

313 >>> multiplied.shape 

314 TensorShape([5, 8]) 

315 """ 

316 

317 def _merge_function(self, inputs): 

318 output = inputs[0] 

319 for i in range(1, len(inputs)): 

320 output = output * inputs[i] 

321 return output 

322 

323 

324@keras_export('keras.layers.Average') 

325class Average(_Merge): 

326 """Layer that averages a list of inputs element-wise. 

327 

328 It takes as input a list of tensors, all of the same shape, and returns 

329 a single tensor (also of the same shape). 

330 

331 Example: 

332 

333 >>> x1 = np.ones((2, 2)) 

334 >>> x2 = np.zeros((2, 2)) 

335 >>> y = tf.keras.layers.Average()([x1, x2]) 

336 >>> y.numpy().tolist() 

337 [[0.5, 0.5], [0.5, 0.5]] 

338 

339 Usage in a functional model: 

340 

341 >>> input1 = tf.keras.layers.Input(shape=(16,)) 

342 >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) 

343 >>> input2 = tf.keras.layers.Input(shape=(32,)) 

344 >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) 

345 >>> avg = tf.keras.layers.Average()([x1, x2]) 

346 >>> out = tf.keras.layers.Dense(4)(avg) 

347 >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) 

348 

349 Raises: 

350 ValueError: If there is a shape mismatch between the inputs and the shapes 

351 cannot be broadcasted to match. 

352 """ 

353 

354 def _merge_function(self, inputs): 

355 output = inputs[0] 

356 for i in range(1, len(inputs)): 

357 output += inputs[i] 

358 return output / len(inputs) 

359 

360 

361@keras_export('keras.layers.Maximum') 

362class Maximum(_Merge): 

363 """Layer that computes the maximum (element-wise) a list of inputs. 

364 

365 It takes as input a list of tensors, all of the same shape, and returns 

366 a single tensor (also of the same shape). 

367 

368 >>> tf.keras.layers.Maximum()([np.arange(5).reshape(5, 1), 

369 ... np.arange(5, 10).reshape(5, 1)]) 

370 <tf.Tensor: shape=(5, 1), dtype=int64, numpy= 

371 array([[5], 

372 [6], 

373 [7], 

374 [8], 

375 [9]])> 

376 

377 >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) 

378 >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) 

379 >>> maxed = tf.keras.layers.Maximum()([x1, x2]) 

380 >>> maxed.shape 

381 TensorShape([5, 8]) 

382 """ 

383 

384 def _merge_function(self, inputs): 

385 output = inputs[0] 

386 for i in range(1, len(inputs)): 

387 output = math_ops.maximum(output, inputs[i]) 

388 return output 

389 

390 

391@keras_export('keras.layers.Minimum') 

392class Minimum(_Merge): 

393 """Layer that computes the minimum (element-wise) a list of inputs. 

394 

395 It takes as input a list of tensors, all of the same shape, and returns 

396 a single tensor (also of the same shape). 

397 

398 >>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1), 

399 ... np.arange(5, 10).reshape(5, 1)]) 

400 <tf.Tensor: shape=(5, 1), dtype=int64, numpy= 

401 array([[0], 

402 [1], 

403 [2], 

404 [3], 

405 [4]])> 

406 

407 >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) 

408 >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) 

409 >>> minned = tf.keras.layers.Minimum()([x1, x2]) 

410 >>> minned.shape 

411 TensorShape([5, 8]) 

412 """ 

413 

414 def _merge_function(self, inputs): 

415 output = inputs[0] 

416 for i in range(1, len(inputs)): 

417 output = math_ops.minimum(output, inputs[i]) 

418 return output 

419 

420 

421@keras_export('keras.layers.Concatenate') 

422class Concatenate(_Merge): 

423 """Layer that concatenates a list of inputs. 

424 

425 It takes as input a list of tensors, all of the same shape except 

426 for the concatenation axis, and returns a single tensor that is the 

427 concatenation of all inputs. 

428 

429 >>> x = np.arange(20).reshape(2, 2, 5) 

430 >>> print(x) 

431 [[[ 0 1 2 3 4] 

432 [ 5 6 7 8 9]] 

433 [[10 11 12 13 14] 

434 [15 16 17 18 19]]] 

435 >>> y = np.arange(20, 30).reshape(2, 1, 5) 

436 >>> print(y) 

437 [[[20 21 22 23 24]] 

438 [[25 26 27 28 29]]] 

439 >>> tf.keras.layers.Concatenate(axis=1)([x, y]) 

440 <tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy= 

441 array([[[ 0, 1, 2, 3, 4], 

442 [ 5, 6, 7, 8, 9], 

443 [20, 21, 22, 23, 24]], 

444 [[10, 11, 12, 13, 14], 

445 [15, 16, 17, 18, 19], 

446 [25, 26, 27, 28, 29]]])> 

447 

448 >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) 

449 >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) 

450 >>> concatted = tf.keras.layers.Concatenate()([x1, x2]) 

451 >>> concatted.shape 

452 TensorShape([5, 16]) 

453 

454 """ 

455 

456 def __init__(self, axis=-1, **kwargs): 

457 """Instantiates a Concatenate layer. 

458 

459 >>> x = np.arange(20).reshape(2, 2, 5) 

460 >>> print(x) 

461 [[[ 0 1 2 3 4] 

462 [ 5 6 7 8 9]] 

463 [[10 11 12 13 14] 

464 [15 16 17 18 19]]] 

465 >>> y = np.arange(20, 30).reshape(2, 1, 5) 

466 >>> print(y) 

467 [[[20 21 22 23 24]] 

468 [[25 26 27 28 29]]] 

469 >>> tf.keras.layers.Concatenate(axis=1)([x, y]) 

470 <tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy= 

471 array([[[ 0, 1, 2, 3, 4], 

472 [ 5, 6, 7, 8, 9], 

473 [20, 21, 22, 23, 24]], 

474 [[10, 11, 12, 13, 14], 

475 [15, 16, 17, 18, 19], 

476 [25, 26, 27, 28, 29]]])> 

477 

478 Args: 

479 axis: Axis along which to concatenate. 

480 **kwargs: standard layer keyword arguments. 

481 """ 

482 super(Concatenate, self).__init__(**kwargs) 

483 self.axis = axis 

484 self.supports_masking = True 

485 self._reshape_required = False 

486 

487 @tf_utils.shape_type_conversion 

488 def build(self, input_shape): 

489 # Used purely for shape validation. 

490 if not isinstance(input_shape[0], tuple) or len(input_shape) < 1: 

491 raise ValueError('A `Concatenate` layer should be called ' 

492 'on a list of at least 1 input.') 

493 if all(shape is None for shape in input_shape): 

494 return 

495 reduced_inputs_shapes = [list(shape) for shape in input_shape] 

496 shape_set = set() 

497 for i in range(len(reduced_inputs_shapes)): 

498 del reduced_inputs_shapes[i][self.axis] 

499 shape_set.add(tuple(reduced_inputs_shapes[i])) 

500 

501 if len(shape_set) != 1: 

502 err_msg = ('A `Concatenate` layer requires inputs with matching shapes ' 

503 'except for the concat axis. Got inputs shapes: %s' % 

504 input_shape) 

505 # Make sure all the shapes have same ranks. 

506 ranks = set(len(shape) for shape in shape_set) 

507 if len(ranks) != 1: 

508 raise ValueError(err_msg) 

509 # Get the only rank for the set. 

510 (rank,) = ranks 

511 for axis in range(rank): 

512 # Skip the Nones in the shape since they are dynamic, also the axis for 

513 # concat has been removed above. 

514 unique_dims = set( 

515 shape[axis] for shape in shape_set if shape[axis] is not None) 

516 if len(unique_dims) > 1: 

517 raise ValueError(err_msg) 

518 

519 def _merge_function(self, inputs): 

520 return backend.concatenate(inputs, axis=self.axis) 

521 

522 @tf_utils.shape_type_conversion 

523 def compute_output_shape(self, input_shape): 

524 if ((not isinstance(input_shape, (tuple, list))) or 

525 (not isinstance(input_shape[0], (tuple, list)))): 

526 # The tf_utils.shape_type_conversion decorator turns tensorshapes 

527 # into tuples, so we need to verify that `input_shape` is a list/tuple, 

528 # *and* that the individual elements are themselves shape tuples. 

529 raise ValueError('A `Concatenate` layer should be called ' 

530 'on a list of inputs.') 

531 input_shapes = input_shape 

532 output_shape = list(input_shapes[0]) 

533 for shape in input_shapes[1:]: 

534 if output_shape[self.axis] is None or shape[self.axis] is None: 

535 output_shape[self.axis] = None 

536 break 

537 output_shape[self.axis] += shape[self.axis] 

538 return tuple(output_shape) 

539 

540 def compute_mask(self, inputs, mask=None): 

541 if mask is None: 

542 return None 

543 if not isinstance(mask, (tuple, list)): 

544 raise ValueError('`mask` should be a list.') 

545 if not isinstance(inputs, (tuple, list)): 

546 raise ValueError('`inputs` should be a list.') 

547 if len(mask) != len(inputs): 

548 raise ValueError('The lists `inputs` and `mask` ' 

549 'should have the same length.') 

550 if all(m is None for m in mask): 

551 return None 

552 # Make a list of masks while making sure 

553 # the dimensionality of each mask 

554 # is the same as the corresponding input. 

555 masks = [] 

556 for input_i, mask_i in zip(inputs, mask): 

557 if mask_i is None: 

558 # Input is unmasked. Append all 1s to masks, 

559 masks.append(array_ops.ones_like(input_i, dtype='bool')) 

560 elif backend.ndim(mask_i) < backend.ndim(input_i): 

561 # Mask is smaller than the input, expand it 

562 masks.append(array_ops.expand_dims(mask_i, axis=-1)) 

563 else: 

564 masks.append(mask_i) 

565 concatenated = backend.concatenate(masks, axis=self.axis) 

566 return backend.all(concatenated, axis=-1, keepdims=False) 

567 

568 def get_config(self): 

569 config = { 

570 'axis': self.axis, 

571 } 

572 base_config = super(Concatenate, self).get_config() 

573 return dict(list(base_config.items()) + list(config.items())) 

574 

575 

576@keras_export('keras.layers.Dot') 

577class Dot(_Merge): 

578 """Layer that computes a dot product between samples in two tensors. 

579 

580 E.g. if applied to a list of two tensors `a` and `b` of shape 

581 `(batch_size, n)`, the output will be a tensor of shape `(batch_size, 1)` 

582 where each entry `i` will be the dot product between 

583 `a[i]` and `b[i]`. 

584 

585 >>> x = np.arange(10).reshape(1, 5, 2) 

586 >>> print(x) 

587 [[[0 1] 

588 [2 3] 

589 [4 5] 

590 [6 7] 

591 [8 9]]] 

592 >>> y = np.arange(10, 20).reshape(1, 2, 5) 

593 >>> print(y) 

594 [[[10 11 12 13 14] 

595 [15 16 17 18 19]]] 

596 >>> tf.keras.layers.Dot(axes=(1, 2))([x, y]) 

597 <tf.Tensor: shape=(1, 2, 2), dtype=int64, numpy= 

598 array([[[260, 360], 

599 [320, 445]]])> 

600 

601 >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2)) 

602 >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2)) 

603 >>> dotted = tf.keras.layers.Dot(axes=1)([x1, x2]) 

604 >>> dotted.shape 

605 TensorShape([5, 1]) 

606 

607 

608 """ 

609 

610 def __init__(self, axes, normalize=False, **kwargs): 

611 """Initializes a layer that computes the element-wise dot product. 

612 

613 >>> x = np.arange(10).reshape(1, 5, 2) 

614 >>> print(x) 

615 [[[0 1] 

616 [2 3] 

617 [4 5] 

618 [6 7] 

619 [8 9]]] 

620 >>> y = np.arange(10, 20).reshape(1, 2, 5) 

621 >>> print(y) 

622 [[[10 11 12 13 14] 

623 [15 16 17 18 19]]] 

624 >>> tf.keras.layers.Dot(axes=(1, 2))([x, y]) 

625 <tf.Tensor: shape=(1, 2, 2), dtype=int64, numpy= 

626 array([[[260, 360], 

627 [320, 445]]])> 

628 

629 Args: 

630 axes: Integer or tuple of integers, 

631 axis or axes along which to take the dot product. If a tuple, should 

632 be two integers corresponding to the desired axis from the first input 

633 and the desired axis from the second input, respectively. Note that the 

634 size of the two selected axes must match. 

635 normalize: Whether to L2-normalize samples along the 

636 dot product axis before taking the dot product. 

637 If set to True, then the output of the dot product 

638 is the cosine proximity between the two samples. 

639 **kwargs: Standard layer keyword arguments. 

640 """ 

641 super(Dot, self).__init__(**kwargs) 

642 if not isinstance(axes, int): 

643 if not isinstance(axes, (list, tuple)): 

644 raise TypeError('Invalid type for `axes` - ' 

645 'should be a list or an int.') 

646 if len(axes) != 2: 

647 raise ValueError('Invalid format for `axes` - ' 

648 'should contain two elements.') 

649 if not isinstance(axes[0], int) or not isinstance(axes[1], int): 

650 raise ValueError('Invalid format for `axes` - ' 

651 'list elements should be "int".') 

652 self.axes = axes 

653 self.normalize = normalize 

654 self.supports_masking = True 

655 self._reshape_required = False 

656 

657 @tf_utils.shape_type_conversion 

658 def build(self, input_shape): 

659 # Used purely for shape validation. 

660 if not isinstance(input_shape[0], tuple) or len(input_shape) != 2: 

661 raise ValueError('A `Dot` layer should be called ' 

662 'on a list of 2 inputs.') 

663 shape1 = input_shape[0] 

664 shape2 = input_shape[1] 

665 if shape1 is None or shape2 is None: 

666 return 

667 if isinstance(self.axes, int): 

668 if self.axes < 0: 

669 axes = [self.axes % len(shape1), self.axes % len(shape2)] 

670 else: 

671 axes = [self.axes] * 2 

672 else: 

673 axes = self.axes 

674 if shape1[axes[0]] != shape2[axes[1]]: 

675 raise ValueError('Dimension incompatibility ' 

676 '%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) + 

677 'Layer shapes: %s, %s. ' % (shape1, shape2) + 

678 'Chosen axes: %s, %s' % (axes[0], axes[1])) 

679 

680 def _merge_function(self, inputs): 

681 base_layer_utils.no_ragged_support(inputs, self.name) 

682 if len(inputs) != 2: 

683 raise ValueError('A `Dot` layer should be called on exactly 2 inputs') 

684 x1 = inputs[0] 

685 x2 = inputs[1] 

686 if isinstance(self.axes, int): 

687 if self.axes < 0: 

688 axes = [self.axes % backend.ndim(x1), self.axes % backend.ndim(x2)] 

689 else: 

690 axes = [self.axes] * 2 

691 else: 

692 axes = [] 

693 for i in range(len(self.axes)): 

694 if self.axes[i] < 0: 

695 axes.append(self.axes[i] % backend.ndim(inputs[i])) 

696 else: 

697 axes.append(self.axes[i]) 

698 if self.normalize: 

699 x1 = nn.l2_normalize(x1, axis=axes[0]) 

700 x2 = nn.l2_normalize(x2, axis=axes[1]) 

701 output = backend.batch_dot(x1, x2, axes) 

702 return output 

703 

704 @tf_utils.shape_type_conversion 

705 def compute_output_shape(self, input_shape): 

706 if not isinstance(input_shape, (tuple, list)) or len(input_shape) != 2: 

707 raise ValueError('A `Dot` layer should be called ' 

708 'on a list of 2 inputs.') 

709 shape1 = list(input_shape[0]) 

710 shape2 = list(input_shape[1]) 

711 if isinstance(self.axes, int): 

712 if self.axes < 0: 

713 axes = [self.axes % len(shape1), self.axes % len(shape2)] 

714 else: 

715 axes = [self.axes] * 2 

716 else: 

717 axes = self.axes 

718 shape1.pop(axes[0]) 

719 shape2.pop(axes[1]) 

720 shape2.pop(0) 

721 output_shape = shape1 + shape2 

722 if len(output_shape) == 1: 

723 output_shape += [1] 

724 return tuple(output_shape) 

725 

726 def compute_mask(self, inputs, mask=None): 

727 return None 

728 

729 def get_config(self): 

730 config = { 

731 'axes': self.axes, 

732 'normalize': self.normalize, 

733 } 

734 base_config = super(Dot, self).get_config() 

735 return dict(list(base_config.items()) + list(config.items())) 

736 

737 

738@keras_export('keras.layers.add') 

739def add(inputs, **kwargs): 

740 """Functional interface to the `tf.keras.layers.Add` layer. 

741 

742 Args: 

743 inputs: A list of input tensors (at least 2) with the same shape. 

744 **kwargs: Standard layer keyword arguments. 

745 

746 Returns: 

747 A tensor as the sum of the inputs. It has the same shape as the inputs. 

748 

749 Examples: 

750 

751 >>> input_shape = (2, 3, 4) 

752 >>> x1 = tf.random.normal(input_shape) 

753 >>> x2 = tf.random.normal(input_shape) 

754 >>> y = tf.keras.layers.add([x1, x2]) 

755 >>> print(y.shape) 

756 (2, 3, 4) 

757 

758 Used in a functional model: 

759 

760 >>> input1 = tf.keras.layers.Input(shape=(16,)) 

761 >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) 

762 >>> input2 = tf.keras.layers.Input(shape=(32,)) 

763 >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) 

764 >>> added = tf.keras.layers.add([x1, x2]) 

765 >>> out = tf.keras.layers.Dense(4)(added) 

766 >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) 

767 

768 """ 

769 return Add(**kwargs)(inputs) 

770 

771 

772@keras_export('keras.layers.subtract') 

773def subtract(inputs, **kwargs): 

774 """Functional interface to the `Subtract` layer. 

775 

776 Args: 

777 inputs: A list of input tensors (exactly 2). 

778 **kwargs: Standard layer keyword arguments. 

779 

780 Returns: 

781 A tensor, the difference of the inputs. 

782 

783 Examples: 

784 

785 ```python 

786 import keras 

787 

788 input1 = keras.layers.Input(shape=(16,)) 

789 x1 = keras.layers.Dense(8, activation='relu')(input1) 

790 input2 = keras.layers.Input(shape=(32,)) 

791 x2 = keras.layers.Dense(8, activation='relu')(input2) 

792 subtracted = keras.layers.subtract([x1, x2]) 

793 

794 out = keras.layers.Dense(4)(subtracted) 

795 model = keras.models.Model(inputs=[input1, input2], outputs=out) 

796 ``` 

797 """ 

798 return Subtract(**kwargs)(inputs) 

799 

800 

801@keras_export('keras.layers.multiply') 

802def multiply(inputs, **kwargs): 

803 """Functional interface to the `Multiply` layer. 

804 

805 Example: 

806 

807 >>> x1 = np.arange(3.0) 

808 >>> x2 = np.arange(3.0) 

809 >>> tf.keras.layers.multiply([x1, x2]) 

810 <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0., 1., 4.], ...)> 

811 

812 Usage in a functional model: 

813 

814 >>> input1 = tf.keras.layers.Input(shape=(16,)) 

815 >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8) 

816 >>> input2 = tf.keras.layers.Input(shape=(32,)) 

817 >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8) 

818 >>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8) 

819 >>> out = tf.keras.layers.Dense(4)(out) 

820 >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) 

821 

822 Args: 

823 inputs: A list of input tensors (at least 2). 

824 **kwargs: Standard layer keyword arguments. 

825 

826 Returns: 

827 A tensor, the element-wise product of the inputs. 

828 """ 

829 return Multiply(**kwargs)(inputs) 

830 

831 

832@keras_export('keras.layers.average') 

833def average(inputs, **kwargs): 

834 """Functional interface to the `tf.keras.layers.Average` layer. 

835 

836 Example: 

837 

838 >>> x1 = np.ones((2, 2)) 

839 >>> x2 = np.zeros((2, 2)) 

840 >>> y = tf.keras.layers.Average()([x1, x2]) 

841 >>> y.numpy().tolist() 

842 [[0.5, 0.5], [0.5, 0.5]] 

843 

844 Usage in a functional model: 

845 

846 >>> input1 = tf.keras.layers.Input(shape=(16,)) 

847 >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) 

848 >>> input2 = tf.keras.layers.Input(shape=(32,)) 

849 >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) 

850 >>> avg = tf.keras.layers.Average()([x1, x2]) 

851 >>> out = tf.keras.layers.Dense(4)(avg) 

852 >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) 

853 

854 Args: 

855 inputs: A list of input tensors (at least 2). 

856 **kwargs: Standard layer keyword arguments. 

857 

858 Returns: 

859 A tensor, the average of the inputs. 

860 

861 Raises: 

862 ValueError: If there is a shape mismatch between the inputs and the shapes 

863 cannot be broadcasted to match. 

864 """ 

865 return Average(**kwargs)(inputs) 

866 

867 

868@keras_export('keras.layers.maximum') 

869def maximum(inputs, **kwargs): 

870 """Functional interface to compute maximum (element-wise) list of `inputs`. 

871 

872 This is equivalent to the `tf.keras.layers.Maximum` layer. 

873 

874 For example: 

875 

876 ```python 

877 input1 = tf.keras.layers.Input(shape=(16,)) 

878 x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8) 

879 input2 = tf.keras.layers.Input(shape=(32,)) 

880 x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8) 

881 max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8) 

882 out = tf.keras.layers.Dense(4)(max_inp) 

883 model = tf.keras.models.Model(inputs=[input1, input2], outputs=out) 

884 ``` 

885 

886 Args: 

887 inputs: A list of input tensors (at least 2) of same shape. 

888 **kwargs: Standard layer keyword arguments. 

889 

890 Returns: 

891 A tensor (of same shape as input tensor) with the element-wise 

892 maximum of the inputs. 

893 

894 Raises: 

895 ValueError: If input tensors are of different shape. 

896 """ 

897 return Maximum(**kwargs)(inputs) 

898 

899 

900@keras_export('keras.layers.minimum') 

901def minimum(inputs, **kwargs): 

902 """Functional interface to the `Minimum` layer. 

903 

904 Args: 

905 inputs: A list of input tensors (at least 2). 

906 **kwargs: Standard layer keyword arguments. 

907 

908 Returns: 

909 A tensor, the element-wise minimum of the inputs. 

910 """ 

911 return Minimum(**kwargs)(inputs) 

912 

913 

914@keras_export('keras.layers.concatenate') 

915def concatenate(inputs, axis=-1, **kwargs): 

916 """Functional interface to the `Concatenate` layer. 

917 

918 >>> x = np.arange(20).reshape(2, 2, 5) 

919 >>> print(x) 

920 [[[ 0 1 2 3 4] 

921 [ 5 6 7 8 9]] 

922 [[10 11 12 13 14] 

923 [15 16 17 18 19]]] 

924 >>> y = np.arange(20, 30).reshape(2, 1, 5) 

925 >>> print(y) 

926 [[[20 21 22 23 24]] 

927 [[25 26 27 28 29]]] 

928 >>> tf.keras.layers.concatenate([x, y], 

929 ... axis=1) 

930 <tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy= 

931 array([[[ 0, 1, 2, 3, 4], 

932 [ 5, 6, 7, 8, 9], 

933 [20, 21, 22, 23, 24]], 

934 [[10, 11, 12, 13, 14], 

935 [15, 16, 17, 18, 19], 

936 [25, 26, 27, 28, 29]]])> 

937 

938 Args: 

939 inputs: A list of input tensors (at least 2). 

940 axis: Concatenation axis. 

941 **kwargs: Standard layer keyword arguments. 

942 

943 Returns: 

944 A tensor, the concatenation of the inputs alongside axis `axis`. 

945 """ 

946 return Concatenate(axis=axis, **kwargs)(inputs) 

947 

948 

949@keras_export('keras.layers.dot') 

950def dot(inputs, axes, normalize=False, **kwargs): 

951 """Functional interface to the `Dot` layer. 

952 

953 Args: 

954 inputs: A list of input tensors (at least 2). 

955 axes: Integer or tuple of integers, 

956 axis or axes along which to take the dot product. 

957 normalize: Whether to L2-normalize samples along the 

958 dot product axis before taking the dot product. 

959 If set to True, then the output of the dot product 

960 is the cosine proximity between the two samples. 

961 **kwargs: Standard layer keyword arguments. 

962 

963 Returns: 

964 A tensor, the dot product of the samples from the inputs. 

965 """ 

966 return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)