Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/training/gradient_descent.py: 62%

26 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15 

16"""GradientDescent for TensorFlow.""" 

17from tensorflow.python.framework import indexed_slices 

18from tensorflow.python.framework import ops 

19from tensorflow.python.ops import math_ops 

20from tensorflow.python.ops import resource_variable_ops 

21from tensorflow.python.training import optimizer 

22from tensorflow.python.training import training_ops 

23from tensorflow.python.util.tf_export import tf_export 

24 

25 

26@tf_export(v1=["train.GradientDescentOptimizer"]) 

27class GradientDescentOptimizer(optimizer.Optimizer): 

28 """Optimizer that implements the gradient descent algorithm. 

29 """ 

30 

31 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): 

32 """Construct a new gradient descent optimizer. 

33 

34 Args: 

35 learning_rate: A Tensor or a floating point value. The learning 

36 rate to use. 

37 use_locking: If True use locks for update operations. 

38 name: Optional name prefix for the operations created when applying 

39 gradients. Defaults to "GradientDescent". 

40 

41 @compatibility(eager) 

42 When eager execution is enabled, `learning_rate` can be a callable that 

43 takes no arguments and returns the actual value to use. This can be useful 

44 for changing these values across different invocations of optimizer 

45 functions. 

46 @end_compatibility 

47 """ 

48 super(GradientDescentOptimizer, self).__init__(use_locking, name) 

49 self._learning_rate = learning_rate 

50 self._learning_rate_tensor = None 

51 

52 def _apply_dense(self, grad, var): 

53 return training_ops.apply_gradient_descent( 

54 var, 

55 math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), 

56 grad, 

57 use_locking=self._use_locking).op 

58 

59 def _resource_apply_dense(self, grad, handle): 

60 return training_ops.resource_apply_gradient_descent( 

61 handle.handle, math_ops.cast(self._learning_rate_tensor, 

62 grad.dtype.base_dtype), 

63 grad, use_locking=self._use_locking) 

64 

65 def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): 

66 return resource_variable_ops.resource_scatter_add( 

67 handle.handle, 

68 indices, 

69 -grad * math_ops.cast(self._learning_rate_tensor, 

70 grad.dtype.base_dtype)) 

71 

72 def _apply_sparse_duplicate_indices(self, grad, var): 

73 delta = indexed_slices.IndexedSlices( 

74 grad.values * 

75 math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), 

76 grad.indices, grad.dense_shape) 

77 return var.scatter_sub(delta, use_locking=self._use_locking) 

78 

79 def _prepare(self): 

80 learning_rate = self._call_if_callable(self._learning_rate) 

81 self._learning_rate_tensor = ops.convert_to_tensor( 

82 learning_rate, name="learning_rate")