Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/botocore/retryhandler.py: 24%
164 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-08 06:51 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-08 06:51 +0000
1# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3#
4# Licensed under the Apache License, Version 2.0 (the "License"). You
5# may not use this file except in compliance with the License. A copy of
6# the License is located at
7#
8# http://aws.amazon.com/apache2.0/
9#
10# or in the "license" file accompanying this file. This file is
11# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12# ANY KIND, either express or implied. See the License for the specific
13# language governing permissions and limitations under the License.
15import functools
16import logging
17import random
18from binascii import crc32
20from botocore.exceptions import (
21 ChecksumError,
22 ConnectionClosedError,
23 ConnectionError,
24 EndpointConnectionError,
25 ReadTimeoutError,
26)
28logger = logging.getLogger(__name__)
29# The only supported error for now is GENERAL_CONNECTION_ERROR
30# which maps to requests generic ConnectionError. If we're able
31# to get more specific exceptions from requests we can update
32# this mapping with more specific exceptions.
33EXCEPTION_MAP = {
34 'GENERAL_CONNECTION_ERROR': [
35 ConnectionError,
36 ConnectionClosedError,
37 ReadTimeoutError,
38 EndpointConnectionError,
39 ],
40}
43def delay_exponential(base, growth_factor, attempts):
44 """Calculate time to sleep based on exponential function.
46 The format is::
48 base * growth_factor ^ (attempts - 1)
50 If ``base`` is set to 'rand' then a random number between
51 0 and 1 will be used as the base.
52 Base must be greater than 0, otherwise a ValueError will be
53 raised.
55 """
56 if base == 'rand':
57 base = random.random()
58 elif base <= 0:
59 raise ValueError(
60 f"The 'base' param must be greater than 0, got: {base}"
61 )
62 time_to_sleep = base * (growth_factor ** (attempts - 1))
63 return time_to_sleep
66def create_exponential_delay_function(base, growth_factor):
67 """Create an exponential delay function based on the attempts.
69 This is used so that you only have to pass it the attempts
70 parameter to calculate the delay.
72 """
73 return functools.partial(
74 delay_exponential, base=base, growth_factor=growth_factor
75 )
78def create_retry_handler(config, operation_name=None):
79 checker = create_checker_from_retry_config(
80 config, operation_name=operation_name
81 )
82 action = create_retry_action_from_config(
83 config, operation_name=operation_name
84 )
85 return RetryHandler(checker=checker, action=action)
88def create_retry_action_from_config(config, operation_name=None):
89 # The spec has the possibility of supporting per policy
90 # actions, but right now, we assume this comes from the
91 # default section, which means that delay functions apply
92 # for every policy in the retry config (per service).
93 delay_config = config['__default__']['delay']
94 if delay_config['type'] == 'exponential':
95 return create_exponential_delay_function(
96 base=delay_config['base'],
97 growth_factor=delay_config['growth_factor'],
98 )
101def create_checker_from_retry_config(config, operation_name=None):
102 checkers = []
103 max_attempts = None
104 retryable_exceptions = []
105 if '__default__' in config:
106 policies = config['__default__'].get('policies', [])
107 max_attempts = config['__default__']['max_attempts']
108 for key in policies:
109 current_config = policies[key]
110 checkers.append(_create_single_checker(current_config))
111 retry_exception = _extract_retryable_exception(current_config)
112 if retry_exception is not None:
113 retryable_exceptions.extend(retry_exception)
114 if operation_name is not None and config.get(operation_name) is not None:
115 operation_policies = config[operation_name]['policies']
116 for key in operation_policies:
117 checkers.append(_create_single_checker(operation_policies[key]))
118 retry_exception = _extract_retryable_exception(
119 operation_policies[key]
120 )
121 if retry_exception is not None:
122 retryable_exceptions.extend(retry_exception)
123 if len(checkers) == 1:
124 # Don't need to use a MultiChecker
125 return MaxAttemptsDecorator(checkers[0], max_attempts=max_attempts)
126 else:
127 multi_checker = MultiChecker(checkers)
128 return MaxAttemptsDecorator(
129 multi_checker,
130 max_attempts=max_attempts,
131 retryable_exceptions=tuple(retryable_exceptions),
132 )
135def _create_single_checker(config):
136 if 'response' in config['applies_when']:
137 return _create_single_response_checker(
138 config['applies_when']['response']
139 )
140 elif 'socket_errors' in config['applies_when']:
141 return ExceptionRaiser()
144def _create_single_response_checker(response):
145 if 'service_error_code' in response:
146 checker = ServiceErrorCodeChecker(
147 status_code=response['http_status_code'],
148 error_code=response['service_error_code'],
149 )
150 elif 'http_status_code' in response:
151 checker = HTTPStatusCodeChecker(
152 status_code=response['http_status_code']
153 )
154 elif 'crc32body' in response:
155 checker = CRC32Checker(header=response['crc32body'])
156 else:
157 # TODO: send a signal.
158 raise ValueError("Unknown retry policy")
159 return checker
162def _extract_retryable_exception(config):
163 applies_when = config['applies_when']
164 if 'crc32body' in applies_when.get('response', {}):
165 return [ChecksumError]
166 elif 'socket_errors' in applies_when:
167 exceptions = []
168 for name in applies_when['socket_errors']:
169 exceptions.extend(EXCEPTION_MAP[name])
170 return exceptions
173class RetryHandler:
174 """Retry handler.
176 The retry handler takes two params, ``checker`` object
177 and an ``action`` object.
179 The ``checker`` object must be a callable object and based on a response
180 and an attempt number, determines whether or not sufficient criteria for
181 a retry has been met. If this is the case then the ``action`` object
182 (which also is a callable) determines what needs to happen in the event
183 of a retry.
185 """
187 def __init__(self, checker, action):
188 self._checker = checker
189 self._action = action
191 def __call__(self, attempts, response, caught_exception, **kwargs):
192 """Handler for a retry.
194 Intended to be hooked up to an event handler (hence the **kwargs),
195 this will process retries appropriately.
197 """
198 checker_kwargs = {
199 'attempt_number': attempts,
200 'response': response,
201 'caught_exception': caught_exception,
202 }
203 if isinstance(self._checker, MaxAttemptsDecorator):
204 retries_context = kwargs['request_dict']['context'].get('retries')
205 checker_kwargs.update({'retries_context': retries_context})
207 if self._checker(**checker_kwargs):
208 result = self._action(attempts=attempts)
209 logger.debug("Retry needed, action of: %s", result)
210 return result
211 logger.debug("No retry needed.")
214class BaseChecker:
215 """Base class for retry checkers.
217 Each class is responsible for checking a single criteria that determines
218 whether or not a retry should not happen.
220 """
222 def __call__(self, attempt_number, response, caught_exception):
223 """Determine if retry criteria matches.
225 Note that either ``response`` is not None and ``caught_exception`` is
226 None or ``response`` is None and ``caught_exception`` is not None.
228 :type attempt_number: int
229 :param attempt_number: The total number of times we've attempted
230 to send the request.
232 :param response: The HTTP response (if one was received).
234 :type caught_exception: Exception
235 :param caught_exception: Any exception that was caught while trying to
236 send the HTTP response.
238 :return: True, if the retry criteria matches (and therefore a retry
239 should occur. False if the criteria does not match.
241 """
242 # The default implementation allows subclasses to not have to check
243 # whether or not response is None or not.
244 if response is not None:
245 return self._check_response(attempt_number, response)
246 elif caught_exception is not None:
247 return self._check_caught_exception(
248 attempt_number, caught_exception
249 )
250 else:
251 raise ValueError("Both response and caught_exception are None.")
253 def _check_response(self, attempt_number, response):
254 pass
256 def _check_caught_exception(self, attempt_number, caught_exception):
257 pass
260class MaxAttemptsDecorator(BaseChecker):
261 """Allow retries up to a maximum number of attempts.
263 This will pass through calls to the decorated retry checker, provided
264 that the number of attempts does not exceed max_attempts. It will
265 also catch any retryable_exceptions passed in. Once max_attempts has
266 been exceeded, then False will be returned or the retryable_exceptions
267 that was previously being caught will be raised.
269 """
271 def __init__(self, checker, max_attempts, retryable_exceptions=None):
272 self._checker = checker
273 self._max_attempts = max_attempts
274 self._retryable_exceptions = retryable_exceptions
276 def __call__(
277 self, attempt_number, response, caught_exception, retries_context
278 ):
279 if retries_context:
280 retries_context['max'] = max(
281 retries_context.get('max', 0), self._max_attempts
282 )
284 should_retry = self._should_retry(
285 attempt_number, response, caught_exception
286 )
287 if should_retry:
288 if attempt_number >= self._max_attempts:
289 # explicitly set MaxAttemptsReached
290 if response is not None and 'ResponseMetadata' in response[1]:
291 response[1]['ResponseMetadata'][
292 'MaxAttemptsReached'
293 ] = True
294 logger.debug(
295 "Reached the maximum number of retry attempts: %s",
296 attempt_number,
297 )
298 return False
299 else:
300 return should_retry
301 else:
302 return False
304 def _should_retry(self, attempt_number, response, caught_exception):
305 if self._retryable_exceptions and attempt_number < self._max_attempts:
306 try:
307 return self._checker(
308 attempt_number, response, caught_exception
309 )
310 except self._retryable_exceptions as e:
311 logger.debug(
312 "retry needed, retryable exception caught: %s",
313 e,
314 exc_info=True,
315 )
316 return True
317 else:
318 # If we've exceeded the max attempts we just let the exception
319 # propagate if one has occurred.
320 return self._checker(attempt_number, response, caught_exception)
323class HTTPStatusCodeChecker(BaseChecker):
324 def __init__(self, status_code):
325 self._status_code = status_code
327 def _check_response(self, attempt_number, response):
328 if response[0].status_code == self._status_code:
329 logger.debug(
330 "retry needed: retryable HTTP status code received: %s",
331 self._status_code,
332 )
333 return True
334 else:
335 return False
338class ServiceErrorCodeChecker(BaseChecker):
339 def __init__(self, status_code, error_code):
340 self._status_code = status_code
341 self._error_code = error_code
343 def _check_response(self, attempt_number, response):
344 if response[0].status_code == self._status_code:
345 actual_error_code = response[1].get('Error', {}).get('Code')
346 if actual_error_code == self._error_code:
347 logger.debug(
348 "retry needed: matching HTTP status and error code seen: "
349 "%s, %s",
350 self._status_code,
351 self._error_code,
352 )
353 return True
354 return False
357class MultiChecker(BaseChecker):
358 def __init__(self, checkers):
359 self._checkers = checkers
361 def __call__(self, attempt_number, response, caught_exception):
362 for checker in self._checkers:
363 checker_response = checker(
364 attempt_number, response, caught_exception
365 )
366 if checker_response:
367 return checker_response
368 return False
371class CRC32Checker(BaseChecker):
372 def __init__(self, header):
373 # The header where the expected crc32 is located.
374 self._header_name = header
376 def _check_response(self, attempt_number, response):
377 http_response = response[0]
378 expected_crc = http_response.headers.get(self._header_name)
379 if expected_crc is None:
380 logger.debug(
381 "crc32 check skipped, the %s header is not "
382 "in the http response.",
383 self._header_name,
384 )
385 else:
386 actual_crc32 = crc32(response[0].content) & 0xFFFFFFFF
387 if not actual_crc32 == int(expected_crc):
388 logger.debug(
389 "retry needed: crc32 check failed, expected != actual: "
390 "%s != %s",
391 int(expected_crc),
392 actual_crc32,
393 )
394 raise ChecksumError(
395 checksum_type='crc32',
396 expected_checksum=int(expected_crc),
397 actual_checksum=actual_crc32,
398 )
401class ExceptionRaiser(BaseChecker):
402 """Raise any caught exceptions.
404 This class will raise any non None ``caught_exception``.
406 """
408 def _check_caught_exception(self, attempt_number, caught_exception):
409 # This is implementation specific, but this class is useful by
410 # coordinating with the MaxAttemptsDecorator.
411 # The MaxAttemptsDecorator has a list of exceptions it should catch
412 # and retry, but something needs to come along and actually raise the
413 # caught_exception. That's what this class is being used for. If
414 # the MaxAttemptsDecorator is not interested in retrying the exception
415 # then this exception just propagates out past the retry code.
416 raise caught_exception